diff --git a/Dockerfile b/Dockerfile index 9af90c49ba0..d53e74af0a4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,6 +16,8 @@ COPY --from=builder /go/src/github.com/openshift/builder/openshift-builder /usr/ COPY imagecontent/policy.json /etc/containers/ COPY imagecontent/registries.conf /etc/containers/ COPY imagecontent/storage.conf /etc/containers/ +RUN mkdir /var/cache/blobs + RUN ln -s /usr/bin/openshift-builder /usr/bin/openshift-sti-build && \ ln -s /usr/bin/openshift-builder /usr/bin/openshift-docker-build && \ ln -s /usr/bin/openshift-builder /usr/bin/openshift-git-clone && \ diff --git a/Dockerfile-dev b/Dockerfile-dev index 535837f0817..293191bb5b5 100644 --- a/Dockerfile-dev +++ b/Dockerfile-dev @@ -15,6 +15,7 @@ RUN INSTALL_PKGS=" \ COPY imagecontent/policy.json /etc/containers/ COPY imagecontent/registries.conf /etc/containers/ COPY imagecontent/storage.conf /etc/containers/ +RUN mkdir /var/cache/blobs COPY openshift-builder /usr/bin RUN ln -s /usr/bin/openshift-builder /usr/bin/openshift-sti-build && \ diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7 index 6a010e40e4a..19442b1dcb3 100644 --- a/Dockerfile.rhel7 +++ b/Dockerfile.rhel7 @@ -16,6 +16,8 @@ COPY --from=builder /go/src/github.com/openshift/builder/openshift-builder /usr/ COPY imagecontent/policy.json /etc/containers/ COPY imagecontent/registries.conf /etc/containers/ COPY imagecontent/storage.conf /etc/containers/ +RUN mkdir /var/cache/blobs + RUN ln -s /usr/bin/openshift-builder /usr/bin/openshift-sti-build && \ ln -s /usr/bin/openshift-builder /usr/bin/openshift-docker-build && \ ln -s /usr/bin/openshift-builder /usr/bin/openshift-git-clone && \ diff --git a/glide.lock b/glide.lock index 1675109f340..d6aae8e3ab1 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 178ee381d8a978c1bb95cc7591706fb2a3de2b55642c9fb62d7f03f83b87b2f8 -updated: 2018-11-27T14:07:43.127548898-05:00 +hash: a2d41560bae11089032aaef05703c4451526a1d7286a2bf895d9504a8a45fa05 +updated: 2019-01-08T09:56:22.257698985-05:00 imports: - name: github.com/Azure/go-ansiterm version: 19f72df4d05d31cbe1c56bfc8045c96babff6c7e @@ -7,6 +7,8 @@ imports: - winterm - name: github.com/blang/semver version: b38d23b8782a487059e8fc8773e9a5b228a77cb6 +- name: github.com/boltdb/bolt + version: fd01fc79c553a8e99d512a07e8e0c63d4a3ccfc5 - name: github.com/BurntSushi/toml version: 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 - name: github.com/certifi/gocertifi @@ -25,17 +27,17 @@ imports: - pkg/types/current - pkg/version - name: github.com/containers/buildah - version: 412617652af33064d599feb86bdf90af342c686c + version: e7ca330f923701dba8859f5c014d0a9a3f7f0a49 subpackages: - bind - chroot - docker - imagebuildah + - pkg/blobcache - unshare - util - name: github.com/containers/image - version: c15806b373f71526b451bcdd26996d7bdf6fb205 - repo: https://github.com/nalind/image + version: f0cbc16b444d729362c78244c715b45bf4019b71 subpackages: - copy - directory @@ -54,6 +56,7 @@ imports: - oci/layout - openshift - ostree + - pkg/blobinfocache - pkg/compression - pkg/docker/config - pkg/strslice @@ -74,11 +77,12 @@ imports: - pkg/rootless - pkg/secrets - name: github.com/containers/storage - version: c12292897a26de8bb20be9cfbee00a9d8ebad1b3 + version: 2e0e1a581257fb1ab2b2298648bc7573d0b4f880 subpackages: - drivers - drivers/aufs - drivers/btrfs + - drivers/copy - drivers/devmapper - drivers/overlay - drivers/overlayutils @@ -241,7 +245,7 @@ imports: - compiler - extensions - name: github.com/gorilla/mux - version: 3d80bc801bb034e17cae38591335b3b1110f1c47 + version: f3ff42f93a451d7ffb2ff11cb9485f3f88089c83 - name: github.com/gregjones/httpcache version: 787624de3eb7bd915c329cba748687a3b22666a6 subpackages: @@ -260,6 +264,14 @@ imports: version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 - name: github.com/json-iterator/go version: f2b4162afba35581b6d4a50d3b8f34e33c144682 +- name: github.com/klauspost/compress + version: 30be6041bed523c18e269a700ebd9c2ea9328574 + subpackages: + - flate +- name: github.com/klauspost/cpuid + version: c640019ed4bda023bac884990967336803e39e3a +- name: github.com/klauspost/pgzip + version: 083b1c3f84dd6486588802e5ce295de3a7f41a8b - name: github.com/mailru/easyjson version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d subpackages: @@ -320,7 +332,7 @@ imports: - specerror - validate - name: github.com/opencontainers/selinux - version: 6ba084dd09db3dfe49a839bab0bbe97fd9274d80 + version: 51c6c0a5dbc675792e953298cb9871819d6f9bb8 subpackages: - go-selinux - go-selinux/label @@ -484,6 +496,10 @@ imports: - lex/httplex - proxy - websocket +- name: golang.org/x/sync + version: 37e7f081c4d4c64e13b10787722085407fe5d15f + subpackages: + - semaphore - name: golang.org/x/sys version: d641721ec2dead6fe5ca284096fe4b1fcd49e427 subpackages: diff --git a/glide.yaml b/glide.yaml index 2db6a0a5d0e..e6f5f5702b6 100644 --- a/glide.yaml +++ b/glide.yaml @@ -87,14 +87,13 @@ import: # we need some of the newer constants - package: golang.org/x/sys version: d641721ec2dead6fe5ca284096fe4b1fcd49e427 -- package: github.com/containers/storage - version: master # builds - package: github.com/containers/buildah version: master - package: github.com/containers/image - repo: https://github.com/nalind/image - version: ip-registry + version: master +- package: github.com/containers/storage + version: master # new enough to know how to disable itself when cross-compiling - package: github.com/opencontainers/selinux version: master diff --git a/pkg/build/builder/cmd/builder.go b/pkg/build/builder/cmd/builder.go index 513e6655dd0..cc91b6248fd 100644 --- a/pkg/build/builder/cmd/builder.go +++ b/pkg/build/builder/cmd/builder.go @@ -57,6 +57,7 @@ type builderConfig struct { buildsClient buildclientv1.BuildInterface cleanup func() store storage.Store + blobCache string } func newBuilderConfigFromEnvironment(out io.Writer, needsDocker bool) (*builderConfig, error) { @@ -131,7 +132,15 @@ func newBuilderConfigFromEnvironment(out io.Writer, needsDocker bool) (*builderC } istorage.Transport.SetStore(store) - dockerClient, err := bld.GetDaemonlessClient(systemContext, store, os.Getenv("BUILD_ISOLATION")) + // Default to using /var/cache/blobs as a blob cache, but allow its location + // to be changed by setting $BUILD_BLOBCACHE_DIR. Setting the location to an + // empty value disables the cache. + cfg.blobCache = "/var/cache/blobs" + if blobCacheDir, isSet := os.LookupEnv("BUILD_BLOBCACHE_DIR"); isSet { + cfg.blobCache = blobCacheDir + } + + dockerClient, err := bld.GetDaemonlessClient(systemContext, store, os.Getenv("BUILD_ISOLATION"), cfg.blobCache) if err != nil { return nil, fmt.Errorf("no daemonless store: %v", err) } @@ -260,7 +269,7 @@ func (c *builderConfig) extractImageContent() error { }() buildDir := bld.InputContentPath - return bld.ExtractImageContent(ctx, c.dockerClient, c.store, buildDir, c.build) + return bld.ExtractImageContent(ctx, c.dockerClient, c.store, buildDir, c.build, c.blobCache) } // execute is responsible for running a build diff --git a/pkg/build/builder/daemonless.go b/pkg/build/builder/daemonless.go index 4c80b6dee52..aeb0e32f115 100644 --- a/pkg/build/builder/daemonless.go +++ b/pkg/build/builder/daemonless.go @@ -15,7 +15,6 @@ import ( "github.com/containers/buildah/imagebuildah" "github.com/containers/buildah/util" "github.com/containers/image/pkg/docker/config" - "github.com/containers/image/pkg/sysregistriesv2" "github.com/containers/image/transports/alltransports" "github.com/containers/image/types" "github.com/containers/storage" @@ -29,7 +28,7 @@ import ( "github.com/openshift/library-go/pkg/image/reference" ) -func pullDaemonlessImage(sc types.SystemContext, store storage.Store, imageName string, authConfig docker.AuthConfiguration) error { +func pullDaemonlessImage(sc types.SystemContext, store storage.Store, imageName string, authConfig docker.AuthConfiguration, blobCacheDirectory string) error { glog.V(2).Infof("Asked to pull fresh copy of %q.", imageName) if imageName == "" { @@ -47,11 +46,6 @@ func pullDaemonlessImage(sc types.SystemContext, store storage.Store, imageName // } systemContext.AuthFilePath = "/tmp/config.json" - registries, err := sysregistriesv2.GetRegistries(&systemContext) - if err != nil { - return fmt.Errorf("error reading system registries configuration: %v", err) - } - ref, err := reference.Parse(imageName) if err != nil { return fmt.Errorf("error parsing image name %s: %v", ref, err) @@ -68,28 +62,17 @@ func pullDaemonlessImage(sc types.SystemContext, store storage.Store, imageName } } - if registry := sysregistriesv2.FindRegistry(imageName, registries); registry != nil { - if registry.Insecure { - glog.V(2).Infof("Registry %q is marked as insecure in the registries configuration.", registry.URL) - systemContext.DockerInsecureSkipTLSVerify = true - systemContext.OCIInsecureSkipTLSVerify = true - } else { - glog.V(2).Infof("Registry %q is marked as secure in the registries configuration.", registry.URL) - } - } else { - glog.V(2).Infof("Registry for %q is not present in the registries configuration, assuming it is secure.", imageName) - } - options := buildah.PullOptions{ ReportWriter: os.Stderr, Store: store, SystemContext: &systemContext, + BlobDirectory: blobCacheDirectory, } _, err = buildah.Pull(context.TODO(), "docker://"+imageName, options) return err } -func buildDaemonlessImage(sc types.SystemContext, store storage.Store, isolation buildah.Isolation, dir string, optimization buildapiv1.ImageOptimizationPolicy, opts *docker.BuildImageOptions) error { +func buildDaemonlessImage(sc types.SystemContext, store storage.Store, isolation buildah.Isolation, contextDir string, optimization buildapiv1.ImageOptimizationPolicy, opts *docker.BuildImageOptions, blobCacheDirectory string) error { glog.V(2).Infof("Building...") args := make(map[string]string) @@ -144,7 +127,7 @@ func buildDaemonlessImage(sc types.SystemContext, store storage.Store, isolation } options := imagebuildah.BuildOptions{ - ContextDirectory: dir, + ContextDirectory: contextDir, PullPolicy: pullPolicy, Isolation: isolation, TransientMounts: transientMounts, @@ -167,6 +150,7 @@ func buildDaemonlessImage(sc types.SystemContext, store storage.Store, isolation NoCache: opts.NoCache, RemoveIntermediateCtrs: opts.RmTmpContainer, ForceRmIntermediateCtrs: true, + BlobDirectory: blobCacheDirectory, } _, _, err := imagebuildah.BuildDockerfiles(opts.Context, store, options, opts.Dockerfile) @@ -229,7 +213,7 @@ func removeDaemonlessImage(sc types.SystemContext, store storage.Store, buildTag return nil } -func pushDaemonlessImage(sc types.SystemContext, store storage.Store, imageName string, authConfig docker.AuthConfiguration) (string, error) { +func pushDaemonlessImage(sc types.SystemContext, store storage.Store, imageName string, authConfig docker.AuthConfiguration, blobCacheDirectory string) (string, error) { glog.V(2).Infof("Pushing image %q from local storage.", imageName) if imageName == "" { @@ -257,26 +241,12 @@ func pushDaemonlessImage(sc types.SystemContext, store storage.Store, imageName glog.V(2).Infof("No authentication secret provided for pushing to registry.") } - registries, err := sysregistriesv2.GetRegistries(&systemContext) - if err != nil { - return "", fmt.Errorf("error reading system registries configuration: %v", err) - } - if registry := sysregistriesv2.FindRegistry(imageName, registries); registry != nil { - if registry.Insecure { - glog.V(2).Infof("Registry %q is marked as insecure in the registries configuration.", registry.URL) - systemContext.DockerInsecureSkipTLSVerify = true - systemContext.OCIInsecureSkipTLSVerify = true - } else { - glog.V(2).Infof("Registry %q is marked as secure in the registries configuration.", registry.URL) - } - } else { - glog.V(2).Infof("Registry for %q is not present in the registries configuration, assuming it is secure.", imageName) - } - options := buildah.PushOptions{ + Compression: archive.Gzip, ReportWriter: os.Stdout, Store: store, SystemContext: &systemContext, + BlobDirectory: blobCacheDirectory, } // TODO - do something with the digest @@ -369,7 +339,7 @@ func inspectDaemonlessImage(sc types.SystemContext, store storage.Store, name st // daemonlessRun mimics the 'docker run --rm' CLI command well enough. It creates and // starts a container and streams its logs. The container is removed after it terminates. -func daemonlessRun(ctx context.Context, store storage.Store, isolation buildah.Isolation, createOpts docker.CreateContainerOptions, attachOpts docker.AttachToContainerOptions) error { +func daemonlessRun(ctx context.Context, store storage.Store, isolation buildah.Isolation, createOpts docker.CreateContainerOptions, attachOpts docker.AttachToContainerOptions, blobCacheDirectory string) error { if createOpts.Config == nil { return fmt.Errorf("error calling daemonlessRun: expected a Config") } @@ -385,6 +355,7 @@ func daemonlessRun(ctx context.Context, store storage.Store, isolation buildah.I MemorySwap: createOpts.HostConfig.MemorySwap, CgroupParent: createOpts.HostConfig.CgroupParent, }, + PullBlobDirectory: blobCacheDirectory, } builder, err := buildah.NewBuilder(ctx, store, builderOptions) @@ -454,15 +425,16 @@ func downloadFromDaemonlessContainer(builder *buildah.Builder, id string, path s // DaemonlessClient is a daemonless DockerClient-like implementation. type DaemonlessClient struct { - SystemContext types.SystemContext - Store storage.Store - Isolation buildah.Isolation - builders map[string]*buildah.Builder + SystemContext types.SystemContext + Store storage.Store + Isolation buildah.Isolation + BlobCacheDirectory string + builders map[string]*buildah.Builder } // GetDaemonlessClient returns a valid implemenatation of the DockerClient // interface, or an error if the implementation couldn't be created. -func GetDaemonlessClient(systemContext types.SystemContext, store storage.Store, isolationSpec string) (client DockerClient, err error) { +func GetDaemonlessClient(systemContext types.SystemContext, store storage.Store, isolationSpec, blobCacheDirectory string) (client DockerClient, err error) { isolation := buildah.IsolationDefault switch strings.ToLower(isolationSpec) { case "chroot": @@ -476,16 +448,21 @@ func GetDaemonlessClient(systemContext types.SystemContext, store storage.Store, return nil, fmt.Errorf("unrecognized BUILD_ISOLATION setting %q", strings.ToLower(isolationSpec)) } + if blobCacheDirectory != "" { + glog.V(0).Infof("Caching blobs under %q.", blobCacheDirectory) + } + return &DaemonlessClient{ - SystemContext: systemContext, - Store: store, - Isolation: isolation, - builders: make(map[string]*buildah.Builder), + SystemContext: systemContext, + Store: store, + Isolation: isolation, + BlobCacheDirectory: blobCacheDirectory, + builders: make(map[string]*buildah.Builder), }, nil } func (d *DaemonlessClient) BuildImage(opts docker.BuildImageOptions) error { - return buildDaemonlessImage(d.SystemContext, d.Store, d.Isolation, opts.ContextDir, buildapiv1.ImageOptimizationNone, &opts) + return buildDaemonlessImage(d.SystemContext, d.Store, d.Isolation, opts.ContextDir, buildapiv1.ImageOptimizationNone, &opts, d.BlobCacheDirectory) } func (d *DaemonlessClient) PushImage(opts docker.PushImageOptions, auth docker.AuthConfiguration) (string, error) { @@ -493,7 +470,7 @@ func (d *DaemonlessClient) PushImage(opts docker.PushImageOptions, auth docker.A if opts.Tag != "" { imageName = imageName + ":" + opts.Tag } - return pushDaemonlessImage(d.SystemContext, d.Store, imageName, auth) + return pushDaemonlessImage(d.SystemContext, d.Store, imageName, auth, d.BlobCacheDirectory) } func (d *DaemonlessClient) RemoveImage(name string) error { @@ -502,8 +479,9 @@ func (d *DaemonlessClient) RemoveImage(name string) error { func (d *DaemonlessClient) CreateContainer(opts docker.CreateContainerOptions) (*docker.Container, error) { options := buildah.BuilderOptions{ - FromImage: opts.Config.Image, - Container: opts.Name, + FromImage: opts.Config.Image, + Container: opts.Name, + PullBlobDirectory: d.BlobCacheDirectory, } builder, err := buildah.NewBuilder(opts.Context, d.Store, options) if err != nil { @@ -556,7 +534,7 @@ func (d *DaemonlessClient) PullImage(opts docker.PullImageOptions, auth docker.A if opts.Tag != "" { imageName = imageName + ":" + opts.Tag } - return pullDaemonlessImage(d.SystemContext, d.Store, imageName, auth) + return pullDaemonlessImage(d.SystemContext, d.Store, imageName, auth, d.BlobCacheDirectory) } func (d *DaemonlessClient) TagImage(name string, opts docker.TagImageOptions) error { diff --git a/pkg/build/builder/daemonless_unsupported.go b/pkg/build/builder/daemonless_unsupported.go index 851821c4d27..97fc01aef27 100644 --- a/pkg/build/builder/daemonless_unsupported.go +++ b/pkg/build/builder/daemonless_unsupported.go @@ -56,6 +56,6 @@ func buildDaemonlessImage(sc types.SystemContext, store storage.Store, isolation } // GetDaemonlessClient returns an error. -func GetDaemonlessClient(systemContext types.SystemContext, store storage.Store, isolationSpec string) (client DockerClient, err error) { +func GetDaemonlessClient(systemContext types.SystemContext, store storage.Store, isolationSpec, blobCacheDirectory string) (client DockerClient, err error) { return nil, errors.New("building images without an engine not supported on this platform") } diff --git a/pkg/build/builder/source.go b/pkg/build/builder/source.go index a74a71e5a34..6ead72517c7 100644 --- a/pkg/build/builder/source.go +++ b/pkg/build/builder/source.go @@ -131,7 +131,7 @@ func ManageDockerfile(dir string, build *buildapiv1.Build) error { return nil } -func ExtractImageContent(ctx context.Context, dockerClient DockerClient, store storage.Store, dir string, build *buildapiv1.Build) error { +func ExtractImageContent(ctx context.Context, dockerClient DockerClient, store storage.Store, dir string, build *buildapiv1.Build, blobCacheDirectory string) error { os.MkdirAll(dir, 0777) forcePull := false switch { @@ -151,7 +151,7 @@ func ExtractImageContent(ctx context.Context, dockerClient DockerClient, store s if image.PullSecret == nil { imageSecretIndex = -1 } - err := extractSourceFromImage(ctx, dockerClient, store, image.From.Name, dir, imageSecretIndex, image.Paths, forcePull) + err := extractSourceFromImage(ctx, dockerClient, store, image.From.Name, dir, imageSecretIndex, image.Paths, forcePull, blobCacheDirectory) if err != nil { return err } @@ -394,7 +394,7 @@ func copyImageSourceFromFilesytem(sourceDir, destDir string) error { return nil } -func extractSourceFromImage(ctx context.Context, dockerClient DockerClient, store storage.Store, image, buildDir string, imageSecretIndex int, paths []buildapiv1.ImageSourcePath, forcePull bool) error { +func extractSourceFromImage(ctx context.Context, dockerClient DockerClient, store storage.Store, image, buildDir string, imageSecretIndex int, paths []buildapiv1.ImageSourcePath, forcePull bool, blobCacheDirectory string) error { glog.V(4).Infof("Extracting image source from image %s", image) pullPolicy := buildah.PullIfMissing @@ -428,7 +428,7 @@ func extractSourceFromImage(ctx context.Context, dockerClient DockerClient, stor // TODO remove this, get CAs+insecure registry config from host. systemContext.OCIInsecureSkipTLSVerify = true - systemContext.DockerInsecureSkipTLSVerify = true + systemContext.DockerInsecureSkipTLSVerify = types.NewOptionalBool(true) if auths != nil { for registry, ac := range auths.Configs { diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore new file mode 100644 index 00000000000..c7bd2b7a5b8 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 00000000000..004e77fe5d2 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile new file mode 100644 index 00000000000..e035e63adcd --- /dev/null +++ b/vendor/github.com/boltdb/bolt/Makefile @@ -0,0 +1,18 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + +test: + @go test -v -cover . + @go test -v ./cmd/bolt + +.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md new file mode 100644 index 00000000000..fc2a1e04843 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/README.md @@ -0,0 +1,935 @@ +Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## A message from the author + +> The original goal of Bolt was to provide a simple pure Go key/value store and to +> not bloat the code with extraneous features. To that end, the project has been +> a success. However, this limited scope also means that the project is complete. +> +> Maintaining an open source database requires an immense amount of time and energy. +> Changes to the code can have unintended and sometimes catastrophic effects so +> even simple changes require hours and hours of careful testing and validation. +> +> Unfortunately I no longer have the time or energy to continue this work. Bolt is +> in a stable state and has years of successful production use. As such, I feel that +> leaving it in its current state is the most prudent course of action. +> +> If you are interested in using a more featureful version of Bolt, I suggest that +> you look at the CoreOS fork called [bbolt](https://github.com/coreos/bbolt). + +- Ben Johnson ([@benbjohnson](https://twitter.com/benbjohnson)) + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB +* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml new file mode 100644 index 00000000000..6e26e941d68 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 00000000000..820d533c15f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 00000000000..98fafdb47d8 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 00000000000..7e5cb4b9412 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 00000000000..b26d84f91ba --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 00000000000..2b676661409 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 00000000000..7058c3d734e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 00000000000..645ddc3edc2 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 00000000000..9331d9771eb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,12 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 00000000000..8c143bc5d19 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 00000000000..d7c39af9253 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 00000000000..cad62dda1e3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 00000000000..307bf2b3ee9 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 00000000000..b00fb0720a4 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path + lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 00000000000..f50442523c3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 00000000000..0c5bf27463e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,777 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/bucket_test.go b/vendor/github.com/boltdb/bolt/bucket_test.go new file mode 100644 index 00000000000..cddbe271313 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket_test.go @@ -0,0 +1,1909 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "log" + "math/rand" + "os" + "strconv" + "strings" + "testing" + "testing/quick" + + "github.com/boltdb/bolt" +) + +// Ensure that a bucket that gets a non-existent key returns nil. +func TestBucket_Get_NonExistent(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatal("expected nil value") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can read a value that is not flushed yet. +func TestBucket_Get_FromNode(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket retrieved via Get() returns a nil. +func TestBucket_Get_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + + if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil { + t.Fatal("expected nil value") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a slice returned from a bucket has a capacity equal to its length. +// This also allows slices to be appended to since it will require a realloc by Go. +// +// https://github.com/boltdb/bolt/issues/544 +func TestBucket_Get_Capacity(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Write key to a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("bucket")) + if err != nil { + return err + } + return b.Put([]byte("key"), []byte("val")) + }); err != nil { + t.Fatal(err) + } + + // Retrieve value and attempt to append to it. + if err := db.Update(func(tx *bolt.Tx) error { + k, v := tx.Bucket([]byte("bucket")).Cursor().First() + + // Verify capacity. + if len(k) != cap(k) { + t.Fatalf("unexpected key slice capacity: %d", cap(k)) + } else if len(v) != cap(v) { + t.Fatalf("unexpected value slice capacity: %d", cap(v)) + } + + // Ensure slice can be appended to without a segfault. + k = append(k, []byte("123")...) + v = append(v, []byte("123")...) + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can write a key/value. +func TestBucket_Put(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if !bytes.Equal([]byte("bar"), v) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can rewrite a key in the same transaction. +func TestBucket_Put_Repeat(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("baz")); err != nil { + t.Fatal(err) + } + + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if !bytes.Equal([]byte("baz"), value) { + t.Fatalf("unexpected value: %v", value) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can write a bunch of large values. +func TestBucket_Put_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + count, factor := 100, 200 + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 1; i < count; i++ { + if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i < count; i++ { + value := b.Get([]byte(strings.Repeat("0", i*factor))) + if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) { + t.Fatalf("unexpected value: %v", value) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a database can perform multiple large appends safely. +func TestDB_Put_VeryLarge(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + n, batchN := 400000, 200000 + ksize, vsize := 8, 500 + + db := MustOpenDB() + defer db.MustClose() + + for i := 0; i < n; i += batchN { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for j := 0; j < batchN; j++ { + k, v := make([]byte, ksize), make([]byte, vsize) + binary.BigEndian.PutUint32(k, uint32(i+j)) + if err := b.Put(k, v); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + } +} + +// Ensure that a setting a value on a key with a bucket value returns an error. +func TestBucket_Put_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b0, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a setting a value while the transaction is closed returns an error. +func TestBucket_Put_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that setting a value on a read-only bucket returns an error. +func TestBucket_Put_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can delete an existing key. +func TestBucket_Delete(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a large set of keys will work correctly. +func TestBucket_Delete_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 100; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil { + t.Fatal(err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + if err := b.Delete([]byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + if v := b.Get([]byte(strconv.Itoa(i))); v != nil { + t.Fatalf("unexpected value: %v, i=%d", v, i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Deleting a very large list of keys will cause the freelist to use overflow. +func TestBucket_Delete_FreelistOverflow(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db := MustOpenDB() + defer db.MustClose() + + k := make([]byte, 16) + for i := uint64(0); i < 10000; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("0")) + if err != nil { + t.Fatalf("bucket error: %s", err) + } + + for j := uint64(0); j < 1000; j++ { + binary.BigEndian.PutUint64(k[:8], i) + binary.BigEndian.PutUint64(k[8:], j) + if err := b.Put(k, nil); err != nil { + t.Fatalf("put error: %s", err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + } + + // Delete all of them in one large transaction + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("0")) + c := b.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + if err := c.Delete(); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that accessing and updating nested buckets is ok across transactions. +func TestBucket_Nested(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + // Create a widgets bucket. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + // Create a widgets/foo bucket. + _, err = b.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + // Create a widgets/bar key. + if err := b.Put([]byte("bar"), []byte("0000")); err != nil { + t.Fatal(err) + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Update widgets/bar. + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Cause a split. + if err := db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 10000; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Insert into widgets/foo/baz. + if err := db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Verify. + if err := db.View(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) { + t.Fatalf("unexpected value: %v", v) + } + if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) { + t.Fatalf("unexpected value: %v", v) + } + for i := 0; i < 10000; i++ { + if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) { + t.Fatalf("unexpected value: %v", v) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket using Delete() returns an error. +func TestBucket_Delete_Bucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a key on a read-only bucket returns an error. +func TestBucket_Delete_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a deleting value while the transaction is closed returns an error. +func TestBucket_Delete_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that deleting a bucket causes nested buckets to be deleted. +func TestBucket_DeleteBucket_Nested(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + bar, err := foo.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. +func TestBucket_DeleteBucket_Nested2(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + bar, err := foo.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + widgets := tx.Bucket([]byte("widgets")) + if widgets == nil { + t.Fatal("expected widgets bucket") + } + + foo := widgets.Bucket([]byte("foo")) + if foo == nil { + t.Fatal("expected foo bucket") + } + + bar := foo.Bucket([]byte("bar")) + if bar == nil { + t.Fatal("expected bar bucket") + } + + if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) != nil { + t.Fatal("expected bucket to be deleted") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. +// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly. +func TestBucket_DeleteBucket_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1000; i++ { + if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a simple value retrieved via Bucket() returns a nil. +func TestBucket_Bucket_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil { + t.Fatal("expected nil bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that creating a bucket on an existing non-bucket key returns an error. +func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket on an existing non-bucket key returns an error. +func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure bucket can set and update its sequence number. +func TestBucket_Sequence(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + bkt, err := tx.CreateBucket([]byte("0")) + if err != nil { + t.Fatal(err) + } + + // Retrieve sequence. + if v := bkt.Sequence(); v != 0 { + t.Fatalf("unexpected sequence: %d", v) + } + + // Update sequence. + if err := bkt.SetSequence(1000); err != nil { + t.Fatal(err) + } + + // Read sequence again. + if v := bkt.Sequence(); v != 1000 { + t.Fatalf("unexpected sequence: %d", v) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify sequence in separate transaction. + if err := db.View(func(tx *bolt.Tx) error { + if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 { + t.Fatalf("unexpected sequence: %d", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can return an autoincrementing sequence. +func TestBucket_NextSequence(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + woojits, err := tx.CreateBucket([]byte("woojits")) + if err != nil { + t.Fatal(err) + } + + // Make sure sequence increments. + if seq, err := widgets.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 1 { + t.Fatalf("unexpecte sequence: %d", seq) + } + + if seq, err := widgets.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 2 { + t.Fatalf("unexpected sequence: %d", seq) + } + + // Buckets should be separate. + if seq, err := woojits.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 1 { + t.Fatalf("unexpected sequence: %d", 1) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket will persist an autoincrementing sequence even if its +// the only thing updated on the bucket. +// https://github.com/boltdb/bolt/issues/296 +func TestBucket_NextSequence_Persist(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + seq, err := tx.Bucket([]byte("widgets")).NextSequence() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } else if seq != 2 { + t.Fatalf("unexpected sequence: %d", seq) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that retrieving the next sequence on a read-only bucket returns an error. +func TestBucket_NextSequence_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + _, err := tx.Bucket([]byte("widgets")).NextSequence() + if err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that retrieving the next sequence for a bucket on a closed database return an error. +func TestBucket_NextSequence_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if _, err := b.NextSequence(); err != bolt.ErrTxClosed { + t.Fatal(err) + } +} + +// Ensure a user can loop over all key/value pairs in a bucket. +func TestBucket_ForEach(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0001")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0002")); err != nil { + t.Fatal(err) + } + + var index int + if err := b.ForEach(func(k, v []byte) error { + switch index { + case 0: + if !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } + case 1: + if !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0001")) { + t.Fatalf("unexpected value: %v", v) + } + case 2: + if !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0000")) { + t.Fatalf("unexpected value: %v", v) + } + } + index++ + return nil + }); err != nil { + t.Fatal(err) + } + + if index != 3 { + t.Fatalf("unexpected index: %d", index) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a database can stop iteration early. +func TestBucket_ForEach_ShortCircuit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0000")); err != nil { + t.Fatal(err) + } + + var index int + if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + index++ + if bytes.Equal(k, []byte("baz")) { + return errors.New("marker") + } + return nil + }); err == nil || err.Error() != "marker" { + t.Fatalf("unexpected error: %s", err) + } + if index != 2 { + t.Fatalf("unexpected index: %d", index) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that looping over a bucket on a closed database returns an error. +func TestBucket_ForEach_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that an error is returned when inserting with an empty key. +func TestBucket_Put_EmptyKey(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired { + t.Fatalf("unexpected error: %s", err) + } + if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that an error is returned when inserting with a key that's too large. +func TestBucket_Put_KeyTooLarge(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that an error is returned when inserting a value that's too large. +func TestBucket_Put_ValueTooLarge(t *testing.T) { + // Skip this test on DroneCI because the machine is resource constrained. + if os.Getenv("DRONE") == "true" { + t.Skip("not enough RAM for test") + } + + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Add bucket with fewer keys but one big value. + bigKey := []byte("really-big-value") + for i := 0; i < 500; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("woojits")) + if err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + } + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("woojits")).Stats() + if stats.BranchPageN != 1 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 7 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 2 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 501 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 2 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } + + branchInuse := 16 // branch page header + branchInuse += 7 * 16 // branch elements + branchInuse += 7 * 3 // branch keys (6 3-byte keys) + if stats.BranchInuse != branchInuse { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } + + leafInuse := 7 * 16 // leaf page header + leafInuse += 501 * 16 // leaf elements + leafInuse += 500*3 + len(bigKey) // leaf keys + leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values + if stats.LeafInuse != leafInuse { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + // Only check allocations for 4KB pages. + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 4096 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 36864 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 0 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 0 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket with random insertion utilizes fill percentage correctly. +func TestBucket_Stats_RandomFill(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } else if os.Getpagesize() != 4096 { + t.Skip("invalid page size for test") + } + + db := MustOpenDB() + defer db.MustClose() + + // Add a set of values in random order. It will be the same random + // order so we can maintain consistency between test runs. + var count int + rand := rand.New(rand.NewSource(42)) + for _, i := range rand.Perm(1000) { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("woojits")) + if err != nil { + t.Fatal(err) + } + b.FillPercent = 0.9 + for _, j := range rand.Perm(100) { + index := (j * 10000) + i + if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil { + t.Fatal(err) + } + count++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("woojits")).Stats() + if stats.KeyN != 100000 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } + + if stats.BranchPageN != 98 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.BranchInuse != 130984 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.BranchAlloc != 401408 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } + + if stats.LeafPageN != 3412 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.LeafInuse != 4742482 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } else if stats.LeafAlloc != 13975552 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats_Small(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + b, err := tx.CreateBucket([]byte("whozawhats")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 0 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 1 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 1 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.LeafInuse != 0 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 0 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 16+16+6 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestBucket_Stats_EmptyBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 0 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 0 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 1 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.LeafInuse != 0 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 0 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 16 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats_Nested(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 100; i++ { + if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil { + t.Fatal(err) + } + } + + bar, err := b.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 10; i++ { + if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + + baz, err := bar.CreateBucket([]byte("baz")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 10; i++ { + if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("foo")) + stats := b.Stats() + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 2 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 122 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 3 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } + + foo := 16 // foo (pghdr) + foo += 101 * 16 // foo leaf elements + foo += 100*2 + 100*2 // foo leaf key/values + foo += 3 + 16 // foo -> bar key/value + + bar := 16 // bar (pghdr) + bar += 11 * 16 // bar leaf elements + bar += 10 + 10 // bar leaf key/values + bar += 3 + 16 // bar -> baz key/value + + baz := 16 // baz (inline) (pghdr) + baz += 10 * 16 // baz leaf elements + baz += 10 + 10 // baz leaf key/values + + if stats.LeafInuse != foo+bar+baz { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 8192 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 3 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != baz { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a large bucket can calculate stats. +func TestBucket_Stats_Large(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db := MustOpenDB() + defer db.MustClose() + + var index int + for i := 0; i < 100; i++ { + // Add bucket with lots of keys. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 1000; i++ { + if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil { + t.Fatal(err) + } + index++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("widgets")).Stats() + if stats.BranchPageN != 13 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 1196 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 100000 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 3 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 25257 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.LeafInuse != 2596916 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 53248 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 4898816 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 0 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 0 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can write random keys and values across multiple transactions. +func TestBucket_Put_Single(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + index := 0 + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + m := make(map[string][]byte) + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + for _, item := range items { + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { + panic("put error: " + err.Error()) + } + m[string(item.Key)] = item.Value + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify all key/values so far. + if err := db.View(func(tx *bolt.Tx) error { + i := 0 + for k, v := range m { + value := tx.Bucket([]byte("widgets")).Get([]byte(k)) + if !bytes.Equal(value, v) { + t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) + db.CopyTempFile() + t.FailNow() + } + i++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + index++ + return true + }, nil); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can insert multiple key/value pairs at once. +func TestBucket_Put_Multiple(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify all items exist. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + value := b.Get(item.Key) + if !bytes.Equal(item.Value, value) { + db.CopyTempFile() + t.Fatalf("exp=%x; got=%x", item.Value, value) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + return true + }, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. +func TestBucket_Delete_Quick(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Remove items one at a time and check consistency. + for _, item := range items { + if err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Delete(item.Key) + }); err != nil { + t.Fatal(err) + } + } + + // Anything before our deletion index should be nil. + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) + return nil + }); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + return true + }, qconfig()); err != nil { + t.Error(err) + } +} + +func ExampleBucket_Put() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a bucket. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + + // Set the value "bar" for the key "foo". + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Read value back in a different read-only transaction. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value of 'foo' is: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value of 'foo' is: bar +} + +func ExampleBucket_Delete() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a bucket. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + + // Set the value "bar" for the key "foo". + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + + // Retrieve the key back from the database and verify it. + value := b.Get([]byte("foo")) + fmt.Printf("The value of 'foo' was: %s\n", value) + + return nil + }); err != nil { + log.Fatal(err) + } + + // Delete the key in a different write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) + }); err != nil { + log.Fatal(err) + } + + // Retrieve the key again. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if value == nil { + fmt.Printf("The value of 'foo' is now: nil\n") + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value of 'foo' was: bar + // The value of 'foo' is now: nil +} + +func ExampleBucket_ForEach() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Insert data into a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } + + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + return err + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + return err + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + return err + } + + // Iterate over items in sorted key order. + if err := b.ForEach(func(k, v []byte) error { + fmt.Printf("A %s is %s.\n", k, v) + return nil + }); err != nil { + return err + } + + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // A cat is lame. + // A dog is fun. + // A liger is awesome. +} diff --git a/vendor/github.com/boltdb/bolt/cmd/bolt/main.go b/vendor/github.com/boltdb/bolt/cmd/bolt/main.go new file mode 100644 index 00000000000..057eca50a2f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cmd/bolt/main.go @@ -0,0 +1,1740 @@ +package main + +import ( + "bytes" + "encoding/binary" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + "unsafe" + + "github.com/boltdb/bolt" +) + +var ( + // ErrUsage is returned when a usage message was printed and the process + // should simply exit with an error. + ErrUsage = errors.New("usage") + + // ErrUnknownCommand is returned when a CLI command is not specified. + ErrUnknownCommand = errors.New("unknown command") + + // ErrPathRequired is returned when the path to a Bolt database is not specified. + ErrPathRequired = errors.New("path required") + + // ErrFileNotFound is returned when a Bolt database does not exist. + ErrFileNotFound = errors.New("file not found") + + // ErrInvalidValue is returned when a benchmark reads an unexpected value. + ErrInvalidValue = errors.New("invalid value") + + // ErrCorrupt is returned when a checking a data file finds errors. + ErrCorrupt = errors.New("invalid value") + + // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly + // divided by the iteration count. + ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") + + // ErrPageIDRequired is returned when a required page id is not specified. + ErrPageIDRequired = errors.New("page id required") + + // ErrPageNotFound is returned when specifying a page above the high water mark. + ErrPageNotFound = errors.New("page not found") + + // ErrPageFreed is returned when reading a page that has already been freed. + ErrPageFreed = errors.New("page freed") +) + +// PageHeaderSize represents the size of the bolt.page header. +const PageHeaderSize = 16 + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err == ErrUsage { + os.Exit(2) + } else if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } +} + +// Main represents the main program execution. +type Main struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main connect to the standard input/output. +func NewMain() *Main { + return &Main{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run executes the program. +func (m *Main) Run(args ...string) error { + // Require a command at the beginning. + if len(args) == 0 || strings.HasPrefix(args[0], "-") { + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + } + + // Execute command. + switch args[0] { + case "help": + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + case "bench": + return newBenchCommand(m).Run(args[1:]...) + case "check": + return newCheckCommand(m).Run(args[1:]...) + case "compact": + return newCompactCommand(m).Run(args[1:]...) + case "dump": + return newDumpCommand(m).Run(args[1:]...) + case "info": + return newInfoCommand(m).Run(args[1:]...) + case "page": + return newPageCommand(m).Run(args[1:]...) + case "pages": + return newPagesCommand(m).Run(args[1:]...) + case "stats": + return newStatsCommand(m).Run(args[1:]...) + default: + return ErrUnknownCommand + } +} + +// Usage returns the help message. +func (m *Main) Usage() string { + return strings.TrimLeft(` +Bolt is a tool for inspecting bolt databases. + +Usage: + + bolt command [arguments] + +The commands are: + + bench run synthetic benchmark against bolt + check verifies integrity of bolt database + compact copies a bolt database, compacting it in the process + info print basic info + help print this screen + pages print list of pages with their types + stats iterate over all pages and generate usage stats + +Use "bolt [command] -h" for more information about a command. +`, "\n") +} + +// CheckCommand represents the "check" command execution. +type CheckCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewCheckCommand returns a CheckCommand. +func newCheckCommand(m *Main) *CheckCommand { + return &CheckCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *CheckCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Perform consistency check. + return db.View(func(tx *bolt.Tx) error { + var count int + ch := tx.Check() + loop: + for { + select { + case err, ok := <-ch: + if !ok { + break loop + } + fmt.Fprintln(cmd.Stdout, err) + count++ + } + } + + // Print summary of errors. + if count > 0 { + fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) + return ErrCorrupt + } + + // Notify user that database is valid. + fmt.Fprintln(cmd.Stdout, "OK") + return nil + }) +} + +// Usage returns the help message. +func (cmd *CheckCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt check PATH + +Check opens a database at PATH and runs an exhaustive check to verify that +all pages are accessible or are marked as freed. It also verifies that no +pages are double referenced. + +Verification errors will stream out as they are found and the process will +return after all pages have been checked. +`, "\n") +} + +// InfoCommand represents the "info" command execution. +type InfoCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewInfoCommand returns a InfoCommand. +func newInfoCommand(m *Main) *InfoCommand { + return &InfoCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *InfoCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open the database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Print basic database info. + info := db.Info() + fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) + + return nil +} + +// Usage returns the help message. +func (cmd *InfoCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt info PATH + +Info prints basic information about the Bolt database at PATH. +`, "\n") +} + +// DumpCommand represents the "dump" command execution. +type DumpCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newDumpCommand returns a DumpCommand. +func newDumpCommand(m *Main) *DumpCommand { + return &DumpCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *DumpCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database to retrieve page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return err + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Print page to stdout. + if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { + return err + } + } + + return nil +} + +// PrintPage prints a given page as hexadecimal. +func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *DumpCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt dump -page PAGEID PATH + +Dump prints a hexadecimal dump of a single page. +`, "\n") +} + +// PageCommand represents the "page" command execution. +type PageCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newPageCommand returns a PageCommand. +func newPageCommand(m *Main) *PageCommand { + return &PageCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PageCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Retrieve page info and page size. + p, buf, err := ReadPage(path, pageID) + if err != nil { + return err + } + + // Print basic page info. + fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) + fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) + fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) + + // Print type-specific data. + switch p.Type() { + case "meta": + err = cmd.PrintMeta(cmd.Stdout, buf) + case "leaf": + err = cmd.PrintLeaf(cmd.Stdout, buf) + case "branch": + err = cmd.PrintBranch(cmd.Stdout, buf) + case "freelist": + err = cmd.PrintFreelist(cmd.Stdout, buf) + } + if err != nil { + return err + } + } + + return nil +} + +// PrintMeta prints the data from the meta page. +func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") + return nil +} + +// PrintLeaf prints the data for a leaf page. +func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + // Format value as string. + var v string + if (e.flags & uint32(bucketLeafFlag)) != 0 { + b := (*bucket)(unsafe.Pointer(&e.value()[0])) + v = fmt.Sprintf("", b.root, b.sequence) + } else if isPrintable(string(e.value())) { + v = fmt.Sprintf("%q", string(e.value())) + } else { + v = fmt.Sprintf("%x", string(e.value())) + } + + fmt.Fprintf(w, "%s: %s\n", k, v) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintBranch prints the data for a leaf page. +func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.branchPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + fmt.Fprintf(w, "%s: \n", k, e.pgid) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintFreelist prints the data for a freelist page. +func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each page in the freelist. + ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) + for i := uint16(0); i < p.count; i++ { + fmt.Fprintf(w, "%d\n", ids[i]) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintPage prints a given page as hexadecimal. +func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *PageCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt page -page PATH pageid [pageid...] + +Page prints one or more pages in human readable format. +`, "\n") +} + +// PagesCommand represents the "pages" command execution. +type PagesCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPagesCommand returns a PagesCommand. +func newPagesCommand(m *Main) *PagesCommand { + return &PagesCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PagesCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + // Write header. + fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") + fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") + + return db.Update(func(tx *bolt.Tx) error { + var id int + for { + p, err := tx.Page(id) + if err != nil { + return &PageError{ID: id, Err: err} + } else if p == nil { + break + } + + // Only display count and overflow if this is a non-free page. + var count, overflow string + if p.Type != "free" { + count = strconv.Itoa(p.Count) + if p.OverflowCount > 0 { + overflow = strconv.Itoa(p.OverflowCount) + } + } + + // Print table row. + fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) + + // Move to the next non-overflow page. + id += 1 + if p.Type != "free" { + id += p.OverflowCount + } + } + return nil + }) +} + +// Usage returns the help message. +func (cmd *PagesCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt pages PATH + +Pages prints a table of pages with their type (meta, leaf, branch, freelist). +Leaf and branch pages will show a key count in the "items" column while the +freelist will show the number of free pages in the "items" column. + +The "overflow" column shows the number of blocks that the page spills over +into. Normally there is no overflow but large keys and values can cause +a single page to take up multiple blocks. +`, "\n") +} + +// StatsCommand represents the "stats" command execution. +type StatsCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewStatsCommand returns a StatsCommand. +func newStatsCommand(m *Main) *StatsCommand { + return &StatsCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *StatsCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path, prefix := fs.Arg(0), fs.Arg(1) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx *bolt.Tx) error { + var s bolt.BucketStats + var count int + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + if bytes.HasPrefix(name, []byte(prefix)) { + s.Add(b.Stats()) + count += 1 + } + return nil + }); err != nil { + return err + } + + fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) + + fmt.Fprintln(cmd.Stdout, "Page count statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) + fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) + + fmt.Fprintln(cmd.Stdout, "Tree statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) + fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) + + fmt.Fprintln(cmd.Stdout, "Page size utilization") + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) + var percentage int + if s.BranchAlloc != 0 { + percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) + percentage = 0 + if s.LeafAlloc != 0 { + percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) + + fmt.Fprintln(cmd.Stdout, "Bucket statistics") + fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) + percentage = 0 + if s.BucketN != 0 { + percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + } + fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) + percentage = 0 + if s.LeafInuse != 0 { + percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) + + return nil + }) +} + +// Usage returns the help message. +func (cmd *StatsCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt stats PATH + +Stats performs an extensive search of the database to track every page +reference. It starts at the current meta page and recursively iterates +through every accessible bucket. + +The following errors can be reported: + + already freed + The page is referenced more than once in the freelist. + + unreachable unfreed + The page is not referenced by a bucket or in the freelist. + + reachable freed + The page is referenced by a bucket but is also in the freelist. + + out of bounds + A page is referenced that is above the high water mark. + + multiple references + A page is referenced by more than one other page. + + invalid type + The page type is not "meta", "leaf", "branch", or "freelist". + +No errors should occur in your database. However, if for some reason you +experience corruption, please submit a ticket to the Bolt project page: + + https://github.com/boltdb/bolt/issues +`, "\n") +} + +var benchBucketName = []byte("bench") + +// BenchCommand represents the "bench" command execution. +type BenchCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewBenchCommand returns a BenchCommand using the +func newBenchCommand(m *Main) *BenchCommand { + return &BenchCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the "bench" command. +func (cmd *BenchCommand) Run(args ...string) error { + // Parse CLI arguments. + options, err := cmd.ParseFlags(args) + if err != nil { + return err + } + + // Remove path if "-work" is not set. Otherwise keep path. + if options.Work { + fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) + } else { + defer os.Remove(options.Path) + } + + // Create database. + db, err := bolt.Open(options.Path, 0666, nil) + if err != nil { + return err + } + db.NoSync = options.NoSync + defer db.Close() + + // Write to the database. + var results BenchResults + if err := cmd.runWrites(db, options, &results); err != nil { + return fmt.Errorf("write: %v", err) + } + + // Read from the database. + if err := cmd.runReads(db, options, &results); err != nil { + return fmt.Errorf("bench: read: %s", err) + } + + // Print results. + fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) + fmt.Fprintln(os.Stderr, "") + return nil +} + +// ParseFlags parses the command line flags. +func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { + var options BenchOptions + + // Parse flagset. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") + fs.StringVar(&options.WriteMode, "write-mode", "seq", "") + fs.StringVar(&options.ReadMode, "read-mode", "seq", "") + fs.IntVar(&options.Iterations, "count", 1000, "") + fs.IntVar(&options.BatchSize, "batch-size", 0, "") + fs.IntVar(&options.KeySize, "key-size", 8, "") + fs.IntVar(&options.ValueSize, "value-size", 32, "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.StringVar(&options.BlockProfile, "blockprofile", "", "") + fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") + fs.BoolVar(&options.NoSync, "no-sync", false, "") + fs.BoolVar(&options.Work, "work", false, "") + fs.StringVar(&options.Path, "path", "", "") + fs.SetOutput(cmd.Stderr) + if err := fs.Parse(args); err != nil { + return nil, err + } + + // Set batch size to iteration size if not set. + // Require that batch size can be evenly divided by the iteration count. + if options.BatchSize == 0 { + options.BatchSize = options.Iterations + } else if options.Iterations%options.BatchSize != 0 { + return nil, ErrNonDivisibleBatchSize + } + + // Generate temp path if one is not passed in. + if options.Path == "" { + f, err := ioutil.TempFile("", "bolt-bench-") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + f.Close() + os.Remove(f.Name()) + options.Path = f.Name() + } + + return &options, nil +} + +// Writes to the database. +func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for writes. + if options.ProfileMode == "rw" || options.ProfileMode == "w" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.WriteMode { + case "seq": + err = cmd.runWritesSequential(db, options, results) + case "rnd": + err = cmd.runWritesRandom(db, options, results) + case "seq-nest": + err = cmd.runWritesSequentialNested(db, options, results) + case "rnd-nest": + err = cmd.runWritesRandomNested(db, options, results) + default: + return fmt.Errorf("invalid write mode: %s", options.WriteMode) + } + + // Save time to write. + results.WriteDuration = time.Since(t) + + // Stop profiling for writes only. + if options.ProfileMode == "w" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + key := make([]byte, options.KeySize) + value := make([]byte, options.ValueSize) + + // Write key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert key/value. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + top, err := tx.CreateBucketIfNotExists(benchBucketName) + if err != nil { + return err + } + top.FillPercent = options.FillPercent + + // Create bucket key. + name := make([]byte, options.KeySize) + binary.BigEndian.PutUint32(name, keySource()) + + // Create bucket. + b, err := top.CreateBucketIfNotExists(name) + if err != nil { + return err + } + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + var key = make([]byte, options.KeySize) + var value = make([]byte, options.ValueSize) + + // Generate key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert value into subbucket. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +// Reads from the database. +func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for reads. + if options.ProfileMode == "r" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.ReadMode { + case "seq": + switch options.WriteMode { + case "seq-nest", "rnd-nest": + err = cmd.runReadsSequentialNested(db, options, results) + default: + err = cmd.runReadsSequential(db, options, results) + } + default: + return fmt.Errorf("invalid read mode: %s", options.ReadMode) + } + + // Save read time. + results.ReadDuration = time.Since(t) + + // Stop profiling for reads. + if options.ProfileMode == "rw" || options.ProfileMode == "r" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + + c := tx.Bucket(benchBucketName).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return errors.New("invalid value") + } + count++ + } + + if options.WriteMode == "seq" && count != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + var top = tx.Bucket(benchBucketName) + if err := top.ForEach(func(name, _ []byte) error { + c := top.Bucket(name).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return ErrInvalidValue + } + count++ + } + return nil + }); err != nil { + return err + } + + if options.WriteMode == "seq-nest" && count != options.Iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +// File handlers for the various profiles. +var cpuprofile, memprofile, blockprofile *os.File + +// Starts all profiles set on the options. +func (cmd *BenchCommand) startProfiling(options *BenchOptions) { + var err error + + // Start CPU profiling. + if options.CPUProfile != "" { + cpuprofile, err = os.Create(options.CPUProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) + os.Exit(1) + } + pprof.StartCPUProfile(cpuprofile) + } + + // Start memory profiling. + if options.MemProfile != "" { + memprofile, err = os.Create(options.MemProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) + os.Exit(1) + } + runtime.MemProfileRate = 4096 + } + + // Start fatal profiling. + if options.BlockProfile != "" { + blockprofile, err = os.Create(options.BlockProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) + os.Exit(1) + } + runtime.SetBlockProfileRate(1) + } +} + +// Stops all profiles. +func (cmd *BenchCommand) stopProfiling() { + if cpuprofile != nil { + pprof.StopCPUProfile() + cpuprofile.Close() + cpuprofile = nil + } + + if memprofile != nil { + pprof.Lookup("heap").WriteTo(memprofile, 0) + memprofile.Close() + memprofile = nil + } + + if blockprofile != nil { + pprof.Lookup("block").WriteTo(blockprofile, 0) + blockprofile.Close() + blockprofile = nil + runtime.SetBlockProfileRate(0) + } +} + +// BenchOptions represents the set of options that can be passed to "bolt bench". +type BenchOptions struct { + ProfileMode string + WriteMode string + ReadMode string + Iterations int + BatchSize int + KeySize int + ValueSize int + CPUProfile string + MemProfile string + BlockProfile string + StatsInterval time.Duration + FillPercent float64 + NoSync bool + Work bool + Path string +} + +// BenchResults represents the performance results of the benchmark. +type BenchResults struct { + WriteOps int + WriteDuration time.Duration + ReadOps int + ReadDuration time.Duration +} + +// Returns the duration for a single write operation. +func (r *BenchResults) WriteOpDuration() time.Duration { + if r.WriteOps == 0 { + return 0 + } + return r.WriteDuration / time.Duration(r.WriteOps) +} + +// Returns average number of write operations that can be performed per second. +func (r *BenchResults) WriteOpsPerSecond() int { + var op = r.WriteOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +// Returns the duration for a single read operation. +func (r *BenchResults) ReadOpDuration() time.Duration { + if r.ReadOps == 0 { + return 0 + } + return r.ReadDuration / time.Duration(r.ReadOps) +} + +// Returns average number of read operations that can be performed per second. +func (r *BenchResults) ReadOpsPerSecond() int { + var op = r.ReadOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +type PageError struct { + ID int + Err error +} + +func (e *PageError) Error() string { + return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) +} + +// isPrintable returns true if the string is valid unicode and contains only printable runes. +func isPrintable(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, ch := range s { + if !unicode.IsPrint(ch) { + return false + } + } + return true +} + +// ReadPage reads page info & full page data from a path. +// This is not transactionally safe. +func ReadPage(path string, pageID int) (*page, []byte, error) { + // Find page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return nil, nil, fmt.Errorf("read page size: %s", err) + } + + // Open database file. + f, err := os.Open(path) + if err != nil { + return nil, nil, err + } + defer f.Close() + + // Read one block into buffer. + buf := make([]byte, pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + + // Determine total number of blocks. + p := (*page)(unsafe.Pointer(&buf[0])) + overflowN := p.overflow + + // Re-read entire page (with overflow) into buffer. + buf = make([]byte, (int(overflowN)+1)*pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + p = (*page)(unsafe.Pointer(&buf[0])) + + return p, buf, nil +} + +// ReadPageSize reads page size a path. +// This is not transactionally safe. +func ReadPageSize(path string) (int, error) { + // Open database file. + f, err := os.Open(path) + if err != nil { + return 0, err + } + defer f.Close() + + // Read 4KB chunk. + buf := make([]byte, 4096) + if _, err := io.ReadFull(f, buf); err != nil { + return 0, err + } + + // Read page size from metadata. + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + return int(m.pageSize), nil +} + +// atois parses a slice of strings into integers. +func atois(strs []string) ([]int, error) { + var a []int + for _, str := range strs { + i, err := strconv.Atoi(str) + if err != nil { + return nil, err + } + a = append(a, i) + } + return a, nil +} + +// DO NOT EDIT. Copied from the "bolt" package. +const maxAllocSize = 0xFFFFFFF + +// DO NOT EDIT. Copied from the "bolt" package. +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +// DO NOT EDIT. Copied from the "bolt" package. +const bucketLeafFlag = 0x01 + +// DO NOT EDIT. Copied from the "bolt" package. +type pgid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type txid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type bucket struct { + root pgid + sequence uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) Type() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] +} + +// CompactCommand represents the "compact" command execution. +type CompactCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + + SrcPath string + DstPath string + TxMaxSize int64 +} + +// newCompactCommand returns a CompactCommand. +func newCompactCommand(m *Main) *CompactCommand { + return &CompactCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *CompactCommand) Run(args ...string) (err error) { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.SetOutput(ioutil.Discard) + fs.StringVar(&cmd.DstPath, "o", "", "") + fs.Int64Var(&cmd.TxMaxSize, "tx-max-size", 65536, "") + if err := fs.Parse(args); err == flag.ErrHelp { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } else if err != nil { + return err + } else if cmd.DstPath == "" { + return fmt.Errorf("output file required") + } + + // Require database paths. + cmd.SrcPath = fs.Arg(0) + if cmd.SrcPath == "" { + return ErrPathRequired + } + + // Ensure source file exists. + fi, err := os.Stat(cmd.SrcPath) + if os.IsNotExist(err) { + return ErrFileNotFound + } else if err != nil { + return err + } + initialSize := fi.Size() + + // Open source database. + src, err := bolt.Open(cmd.SrcPath, 0444, nil) + if err != nil { + return err + } + defer src.Close() + + // Open destination database. + dst, err := bolt.Open(cmd.DstPath, fi.Mode(), nil) + if err != nil { + return err + } + defer dst.Close() + + // Run compaction. + if err := cmd.compact(dst, src); err != nil { + return err + } + + // Report stats on new size. + fi, err = os.Stat(cmd.DstPath) + if err != nil { + return err + } else if fi.Size() == 0 { + return fmt.Errorf("zero db size") + } + fmt.Fprintf(cmd.Stdout, "%d -> %d bytes (gain=%.2fx)\n", initialSize, fi.Size(), float64(initialSize)/float64(fi.Size())) + + return nil +} + +func (cmd *CompactCommand) compact(dst, src *bolt.DB) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > cmd.TxMaxSize && cmd.TxMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + + return tx.Commit() +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func (cmd *CompactCommand) walk(db *bolt.DB, walkFn walkFunc) error { + return db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return cmd.walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func (cmd *CompactCommand) walkBucket(b *bolt.Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return cmd.walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return cmd.walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} + +// Usage returns the help message. +func (cmd *CompactCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt compact [options] -o DST SRC + +Compact opens a database at SRC path and walks it recursively, copying keys +as they are found from all buckets, to a newly created database at DST path. + +The original database is left untouched. + +Additional options include: + + -tx-max-size NUM + Specifies the maximum size of individual transactions. + Defaults to 64KB. +`, "\n") +} diff --git a/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go b/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go new file mode 100644 index 00000000000..0a11ff33957 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go @@ -0,0 +1,356 @@ +package main_test + +import ( + "bytes" + crypto "crypto/rand" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "strconv" + "testing" + + "github.com/boltdb/bolt" + "github.com/boltdb/bolt/cmd/bolt" +) + +// Ensure the "info" command can print information about a database. +func TestInfoCommand_Run(t *testing.T) { + db := MustOpen(0666, nil) + db.DB.Close() + defer db.Close() + + // Run the info command. + m := NewMain() + if err := m.Run("info", db.Path); err != nil { + t.Fatal(err) + } +} + +// Ensure the "stats" command executes correctly with an empty database. +func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { + // Ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + db := MustOpen(0666, nil) + defer db.Close() + db.DB.Close() + + // Generate expected result. + exp := "Aggregate statistics for 0 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 0\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 0\n" + + "\tNumber of levels in B+tree: 0\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 0\n" + + "\tBytes actually used for leaf data: 0 (0%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 0\n" + + "\tTotal number on inlined buckets: 0 (0%)\n" + + "\tBytes used for inlined buckets: 0 (0%)\n" + + // Run the command. + m := NewMain() + if err := m.Run("stats", db.Path); err != nil { + t.Fatal(err) + } else if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +// Ensure the "stats" command can execute correctly. +func TestStatsCommand_Run(t *testing.T) { + // Ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + db := MustOpen(0666, nil) + defer db.Close() + + if err := db.Update(func(tx *bolt.Tx) error { + // Create "foo" bucket. + b, err := tx.CreateBucket([]byte("foo")) + if err != nil { + return err + } + for i := 0; i < 10; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "bar" bucket. + b, err = tx.CreateBucket([]byte("bar")) + if err != nil { + return err + } + for i := 0; i < 100; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "baz" bucket. + b, err = tx.CreateBucket([]byte("baz")) + if err != nil { + return err + } + if err := b.Put([]byte("key"), []byte("value")); err != nil { + return err + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.DB.Close() + + // Generate expected result. + exp := "Aggregate statistics for 3 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 1\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 111\n" + + "\tNumber of levels in B+tree: 1\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 4096\n" + + "\tBytes actually used for leaf data: 1996 (48%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 3\n" + + "\tTotal number on inlined buckets: 2 (66%)\n" + + "\tBytes used for inlined buckets: 236 (11%)\n" + + // Run the command. + m := NewMain() + if err := m.Run("stats", db.Path); err != nil { + t.Fatal(err) + } else if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +// Main represents a test wrapper for main.Main that records output. +type Main struct { + *main.Main + Stdin bytes.Buffer + Stdout bytes.Buffer + Stderr bytes.Buffer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + m := &Main{Main: main.NewMain()} + m.Main.Stdin = &m.Stdin + m.Main.Stdout = &m.Stdout + m.Main.Stderr = &m.Stderr + return m +} + +// MustOpen creates a Bolt database in a temporary location. +func MustOpen(mode os.FileMode, options *bolt.Options) *DB { + // Create temporary path. + f, _ := ioutil.TempFile("", "bolt-") + f.Close() + os.Remove(f.Name()) + + db, err := bolt.Open(f.Name(), mode, options) + if err != nil { + panic(err.Error()) + } + return &DB{DB: db, Path: f.Name()} +} + +// DB is a test wrapper for bolt.DB. +type DB struct { + *bolt.DB + Path string +} + +// Close closes and removes the database. +func (db *DB) Close() error { + defer os.Remove(db.Path) + return db.DB.Close() +} + +func TestCompactCommand_Run(t *testing.T) { + var s int64 + if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil { + t.Fatal(err) + } + rand.Seed(s) + + dstdb := MustOpen(0666, nil) + dstdb.Close() + + // fill the db + db := MustOpen(0666, nil) + if err := db.Update(func(tx *bolt.Tx) error { + n := 2 + rand.Intn(5) + for i := 0; i < n; i++ { + k := []byte(fmt.Sprintf("b%d", i)) + b, err := tx.CreateBucketIfNotExists(k) + if err != nil { + return err + } + if err := b.SetSequence(uint64(i)); err != nil { + return err + } + if err := fillBucket(b, append(k, '.')); err != nil { + return err + } + } + return nil + }); err != nil { + db.Close() + t.Fatal(err) + } + + // make the db grow by adding large values, and delete them. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("large_vals")) + if err != nil { + return err + } + n := 5 + rand.Intn(5) + for i := 0; i < n; i++ { + v := make([]byte, 1000*1000*(1+rand.Intn(5))) + _, err := crypto.Read(v) + if err != nil { + return err + } + if err := b.Put([]byte(fmt.Sprintf("l%d", i)), v); err != nil { + return err + } + } + return nil + }); err != nil { + db.Close() + t.Fatal(err) + } + if err := db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("large_vals")).Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + if err := c.Delete(); err != nil { + return err + } + } + return tx.DeleteBucket([]byte("large_vals")) + }); err != nil { + db.Close() + t.Fatal(err) + } + db.DB.Close() + defer db.Close() + defer dstdb.Close() + + dbChk, err := chkdb(db.Path) + if err != nil { + t.Fatal(err) + } + + m := NewMain() + if err := m.Run("compact", "-o", dstdb.Path, db.Path); err != nil { + t.Fatal(err) + } + + dbChkAfterCompact, err := chkdb(db.Path) + if err != nil { + t.Fatal(err) + } + + dstdbChk, err := chkdb(dstdb.Path) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(dbChk, dbChkAfterCompact) { + t.Error("the original db has been touched") + } + if !bytes.Equal(dbChk, dstdbChk) { + t.Error("the compacted db data isn't the same than the original db") + } +} + +func fillBucket(b *bolt.Bucket, prefix []byte) error { + n := 10 + rand.Intn(50) + for i := 0; i < n; i++ { + v := make([]byte, 10*(1+rand.Intn(4))) + _, err := crypto.Read(v) + if err != nil { + return err + } + k := append(prefix, []byte(fmt.Sprintf("k%d", i))...) + if err := b.Put(k, v); err != nil { + return err + } + } + // limit depth of subbuckets + s := 2 + rand.Intn(4) + if len(prefix) > (2*s + 1) { + return nil + } + n = 1 + rand.Intn(3) + for i := 0; i < n; i++ { + k := append(prefix, []byte(fmt.Sprintf("b%d", i))...) + sb, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := fillBucket(sb, append(k, '.')); err != nil { + return err + } + } + return nil +} + +func chkdb(path string) ([]byte, error) { + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return nil, err + } + defer db.Close() + var buf bytes.Buffer + err = db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return walkBucket(b, name, nil, &buf) + }) + }) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func walkBucket(parent *bolt.Bucket, k []byte, v []byte, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil { + return err + } + + // not a bucket, exit. + if v != nil { + return nil + } + return parent.ForEach(func(k, v []byte) error { + if v == nil { + return walkBucket(parent.Bucket(k), k, nil, w) + } + return walkBucket(parent, k, v, w) + }) +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 00000000000..1be9f35e3ef --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/cursor_test.go b/vendor/github.com/boltdb/bolt/cursor_test.go new file mode 100644 index 00000000000..562d60f9af3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor_test.go @@ -0,0 +1,817 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "fmt" + "log" + "os" + "reflect" + "sort" + "testing" + "testing/quick" + + "github.com/boltdb/bolt" +) + +// Ensure that a cursor can return a reference to the bucket that created it. +func TestCursor_Bucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) { + t.Fatal("cursor bucket mismatch") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can seek to the appropriate keys. +func TestCursor_Seek(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0001")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0002")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0003")); err != nil { + t.Fatal(err) + } + + if _, err := b.CreateBucket([]byte("bkt")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + + // Exact match should go to the key. + if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } + + // Inexact match should go to the next key. + if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0003")) { + t.Fatalf("unexpected value: %v", v) + } + + // Low key should go to the first key. + if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } + + // High key should return no key. + if k, v := c.Seek([]byte("zzz")); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + // Buckets should return their key but no value. + if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestCursor_Delete(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + const count = 1000 + + // Insert every other key between 0 and $count. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < count; i += 1 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(i)) + if err := b.Put(k, make([]byte, 100)); err != nil { + t.Fatal(err) + } + } + if _, err := b.CreateBucket([]byte("sub")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + bound := make([]byte, 8) + binary.BigEndian.PutUint64(bound, uint64(count/2)) + for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { + if err := c.Delete(); err != nil { + t.Fatal(err) + } + } + + c.Seek([]byte("sub")) + if err := c.Delete(); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("widgets")).Stats() + if stats.KeyN != count/2+1 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can seek to the appropriate keys when there are a +// large number of keys. This test also checks that seek will always move +// forward to the next key. +// +// Related: https://github.com/boltdb/bolt/pull/187 +func TestCursor_Seek_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var count = 10000 + + // Insert every other key between 0 and $count. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < count; i += 100 { + for j := i; j < i+100; j += 2 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(j)) + if err := b.Put(k, make([]byte, 100)); err != nil { + t.Fatal(err) + } + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + for i := 0; i < count; i++ { + seek := make([]byte, 8) + binary.BigEndian.PutUint64(seek, uint64(i)) + + k, _ := c.Seek(seek) + + // The last seek is beyond the end of the the range so + // it should return nil. + if i == count-1 { + if k != nil { + t.Fatal("expected nil key") + } + continue + } + + // Otherwise we should seek to the exact key or the next key. + num := binary.BigEndian.Uint64(k) + if i%2 == 0 { + if num != uint64(i) { + t.Fatalf("unexpected num: %d", num) + } + } else { + if num != uint64(i+1) { + t.Fatalf("unexpected num: %d", num) + } + } + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a cursor can iterate over an empty bucket without error. +func TestCursor_EmptyBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.First() + if k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. +func TestCursor_EmptyBucketReverse(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.Last() + if k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can iterate over a single root with a couple elements. +func TestCursor_Iterate_Leaf(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{0}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{1}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + defer func() { _ = tx.Rollback() }() + + c := tx.Bucket([]byte("widgets")).Cursor() + + k, v := c.First() + if !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{1}) { + t.Fatalf("unexpected value: %v", v) + } + + k, v = c.Next() + if !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{}) { + t.Fatalf("unexpected value: %v", v) + } + + k, v = c.Next() + if !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{0}) { + t.Fatalf("unexpected value: %v", v) + } + + k, v = c.Next() + if k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + k, v = c.Next() + if k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. +func TestCursor_LeafRootReverse(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{0}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{1}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + + if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{0}) { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{}) { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{1}) { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Prev(); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + if k, v := c.Prev(); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can restart from the beginning. +func TestCursor_Restart(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + + if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } + if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } + + if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } + if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a cursor can skip over empty pages that have been deleted. +func TestCursor_First_EmptyPages(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create 1000 keys in the "widgets" bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1000; i++ { + if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil { + t.Fatal(err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + + // Delete half the keys and then try to iterate. + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < 600; i++ { + if err := b.Delete(u64tob(uint64(i))); err != nil { + t.Fatal(err) + } + } + + c := b.Cursor() + var n int + for k, _ := c.First(); k != nil; k, _ = c.Next() { + n++ + } + if n != 400 { + t.Fatalf("unexpected key count: %d", n) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx can iterate over all elements in a bucket. +func TestCursor_QuickCheck(t *testing.T) { + f := func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Sort test data. + sort.Sort(items) + + // Iterate over all items and check consistency. + var index = 0 + tx, err = db.Begin(false) + if err != nil { + t.Fatal(err) + } + + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { + if !bytes.Equal(k, items[index].Key) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, items[index].Value) { + t.Fatalf("unexpected value: %v", v) + } + index++ + } + if len(items) != index { + t.Fatalf("unexpected item count: %v, expected %v", len(items), index) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can iterate over all elements in a bucket in reverse. +func TestCursor_QuickCheck_Reverse(t *testing.T) { + f := func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Sort test data. + sort.Sort(revtestdata(items)) + + // Iterate over all items and check consistency. + var index = 0 + tx, err = db.Begin(false) + if err != nil { + t.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { + if !bytes.Equal(k, items[index].Key) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, items[index].Value) { + t.Fatalf("unexpected value: %v", v) + } + index++ + } + if len(items) != index { + t.Fatalf("unexpected item count: %v, expected %v", len(items), index) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a Tx cursor can iterate over subbuckets. +func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("baz")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + names = append(names, string(k)) + if v != nil { + t.Fatalf("unexpected value: %v", v) + } + } + if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) { + t.Fatalf("unexpected names: %+v", names) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can reverse iterate over subbuckets. +func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("baz")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil; k, v = c.Prev() { + names = append(names, string(k)) + if v != nil { + t.Fatalf("unexpected value: %v", v) + } + } + if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) { + t.Fatalf("unexpected names: %+v", names) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func ExampleCursor() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a new bucket. + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } + + // Insert data into a bucket. + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + log.Fatal(err) + } + + // Create a cursor for iteration. + c := b.Cursor() + + // Iterate over items in sorted key order. This starts from the + // first key/value pair and updates the k/v variables to the + // next key/value on each iteration. + // + // The loop finishes at the end of the cursor when a nil key is returned. + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("A %s is %s.\n", k, v) + } + + return nil + }); err != nil { + log.Fatal(err) + } + + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // A cat is lame. + // A dog is fun. + // A liger is awesome. +} + +func ExampleCursor_reverse() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a new bucket. + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } + + // Insert data into a bucket. + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + log.Fatal(err) + } + + // Create a cursor for iteration. + c := b.Cursor() + + // Iterate over items in reverse sorted key order. This starts + // from the last key/value pair and updates the k/v variables to + // the previous key/value on each iteration. + // + // The loop finishes at the beginning of the cursor when a nil key + // is returned. + for k, v := c.Last(); k != nil; k, v = c.Prev() { + fmt.Printf("A %s is %s.\n", k, v) + } + + return nil + }); err != nil { + log.Fatal(err) + } + + // Close the database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // A liger is awesome. + // A dog is fun. + // A cat is lame. +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 00000000000..cc3596fa7bf --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,1037 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { + db.pageSize = int(m.pageSize) + } + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + c.err <- err + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/db_test.go b/vendor/github.com/boltdb/bolt/db_test.go new file mode 100644 index 00000000000..3034d4f476e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db_test.go @@ -0,0 +1,1545 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "errors" + "flag" + "fmt" + "hash/fnv" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "testing" + "time" + "unsafe" + + "github.com/boltdb/bolt" +) + +var statsFlag = flag.Bool("stats", false, "show performance stats") + +// version is the data file format version. +const version = 2 + +// magic is the marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// pageSize is the size of one page in the data file. +const pageSize = 4096 + +// pageHeaderSize is the size of a page header. +const pageHeaderSize = 16 + +// meta represents a simplified version of a database meta page for testing. +type meta struct { + magic uint32 + version uint32 + _ uint32 + _ uint32 + _ [16]byte + _ uint64 + pgid uint64 + _ uint64 + checksum uint64 +} + +// Ensure that a database can be opened without error. +func TestOpen(t *testing.T) { + path := tempfile() + db, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } else if db == nil { + t.Fatal("expected db") + } + + if s := db.Path(); s != path { + t.Fatalf("unexpected path: %s", s) + } + + if err := db.Close(); err != nil { + t.Fatal(err) + } +} + +// Ensure that opening a database with a blank path returns an error. +func TestOpen_ErrPathRequired(t *testing.T) { + _, err := bolt.Open("", 0666, nil) + if err == nil { + t.Fatalf("expected error") + } +} + +// Ensure that opening a database with a bad path returns an error. +func TestOpen_ErrNotExists(t *testing.T) { + _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0666, nil) + if err == nil { + t.Fatal("expected error") + } +} + +// Ensure that opening a file that is not a Bolt database returns ErrInvalid. +func TestOpen_ErrInvalid(t *testing.T) { + path := tempfile() + + f, err := os.Create(path) + if err != nil { + t.Fatal(err) + } + if _, err := fmt.Fprintln(f, "this is not a bolt database"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + defer os.Remove(path) + + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrInvalid { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that opening a file with two invalid versions returns ErrVersionMismatch. +func TestOpen_ErrVersionMismatch(t *testing.T) { + if pageSize != os.Getpagesize() { + t.Skip("page size mismatch") + } + + // Create empty database. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + // Close database. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + // Read data file. + buf, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + // Rewrite meta pages. + meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) + meta0.version++ + meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) + meta1.version++ + if err := ioutil.WriteFile(path, buf, 0666); err != nil { + t.Fatal(err) + } + + // Reopen data file. + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrVersionMismatch { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that opening a file with two invalid checksums returns ErrChecksum. +func TestOpen_ErrChecksum(t *testing.T) { + if pageSize != os.Getpagesize() { + t.Skip("page size mismatch") + } + + // Create empty database. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + // Close database. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + // Read data file. + buf, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + // Rewrite meta pages. + meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) + meta0.pgid++ + meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) + meta1.pgid++ + if err := ioutil.WriteFile(path, buf, 0666); err != nil { + t.Fatal(err) + } + + // Reopen data file. + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrChecksum { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that opening a database does not increase its size. +// https://github.com/boltdb/bolt/issues/291 +func TestOpen_Size(t *testing.T) { + // Open a data file. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + pagesize := db.Info().PageSize + + // Insert until we get above the minimum 4MB size. + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("data")) + for i := 0; i < 10000; i++ { + if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Close database and grab the size. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + sz := fileSize(path) + if sz == 0 { + t.Fatalf("unexpected new file size: %d", sz) + } + + // Reopen database, update, and check size again. + db0, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db0.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db0.Close(); err != nil { + t.Fatal(err) + } + newSz := fileSize(path) + if newSz == 0 { + t.Fatalf("unexpected new file size: %d", newSz) + } + + // Compare the original size with the new size. + // db size might increase by a few page sizes due to the new small update. + if sz < newSz-5*int64(pagesize) { + t.Fatalf("unexpected file growth: %d => %d", sz, newSz) + } +} + +// Ensure that opening a database beyond the max step size does not increase its size. +// https://github.com/boltdb/bolt/issues/303 +func TestOpen_Size_Large(t *testing.T) { + if testing.Short() { + t.Skip("short mode") + } + + // Open a data file. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + pagesize := db.Info().PageSize + + // Insert until we get above the minimum 4MB size. + var index uint64 + for i := 0; i < 10000; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("data")) + for j := 0; j < 1000; j++ { + if err := b.Put(u64tob(index), make([]byte, 50)); err != nil { + t.Fatal(err) + } + index++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + // Close database and grab the size. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + sz := fileSize(path) + if sz == 0 { + t.Fatalf("unexpected new file size: %d", sz) + } else if sz < (1 << 30) { + t.Fatalf("expected larger initial size: %d", sz) + } + + // Reopen database, update, and check size again. + db0, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db0.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) + }); err != nil { + t.Fatal(err) + } + if err := db0.Close(); err != nil { + t.Fatal(err) + } + + newSz := fileSize(path) + if newSz == 0 { + t.Fatalf("unexpected new file size: %d", newSz) + } + + // Compare the original size with the new size. + // db size might increase by a few page sizes due to the new small update. + if sz < newSz-5*int64(pagesize) { + t.Fatalf("unexpected file growth: %d => %d", sz, newSz) + } +} + +// Ensure that a re-opened database is consistent. +func TestOpen_Check(t *testing.T) { + path := tempfile() + + db, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } + + db, err = bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } +} + +// Ensure that write errors to the meta file handler during initialization are returned. +func TestOpen_MetaInitWriteError(t *testing.T) { + t.Skip("pending") +} + +// Ensure that a database that is too small returns an error. +func TestOpen_FileTooSmall(t *testing.T) { + path := tempfile() + + db, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } + + // corrupt the database + if err := os.Truncate(path, int64(os.Getpagesize())); err != nil { + t.Fatal(err) + } + + db, err = bolt.Open(path, 0666, nil) + if err == nil || err.Error() != "file size too small" { + t.Fatalf("unexpected error: %s", err) + } +} + +// TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough +// to hold data from concurrent write transaction resolves the issue that +// read transaction blocks the write transaction and causes deadlock. +// This is a very hacky test since the mmap size is not exposed. +func TestDB_Open_InitialMmapSize(t *testing.T) { + path := tempfile() + defer os.Remove(path) + + initMmapSize := 1 << 31 // 2GB + testWriteSize := 1 << 27 // 134MB + + db, err := bolt.Open(path, 0666, &bolt.Options{InitialMmapSize: initMmapSize}) + if err != nil { + t.Fatal(err) + } + + // create a long-running read transaction + // that never gets closed while writing + rtx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + + // create a write transaction + wtx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := wtx.CreateBucket([]byte("test")) + if err != nil { + t.Fatal(err) + } + + // and commit a large write + err = b.Put([]byte("foo"), make([]byte, testWriteSize)) + if err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + + go func() { + if err := wtx.Commit(); err != nil { + t.Fatal(err) + } + done <- struct{}{} + }() + + select { + case <-time.After(5 * time.Second): + t.Errorf("unexpected that the reader blocks writer") + case <-done: + } + + if err := rtx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a database cannot open a transaction when it's not open. +func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { + var db bolt.DB + if _, err := db.Begin(false); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that a read-write transaction can be retrieved. +func TestDB_BeginRW(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } else if tx == nil { + t.Fatal("expected tx") + } + + if tx.DB() != db.DB { + t.Fatal("unexpected tx database") + } else if !tx.Writable() { + t.Fatal("expected writable tx") + } + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } +} + +// Ensure that opening a transaction while the DB is closed returns an error. +func TestDB_BeginRW_Closed(t *testing.T) { + var db bolt.DB + if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } +func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } + +// Ensure that a database cannot close while transactions are open. +func testDB_Close_PendingTx(t *testing.T, writable bool) { + db := MustOpenDB() + defer db.MustClose() + + // Start transaction. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + // Open update in separate goroutine. + done := make(chan struct{}) + go func() { + if err := db.Close(); err != nil { + t.Fatal(err) + } + close(done) + }() + + // Ensure database hasn't closed. + time.Sleep(100 * time.Millisecond) + select { + case <-done: + t.Fatal("database closed too early") + default: + } + + // Commit transaction. + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Ensure database closed now. + time.Sleep(100 * time.Millisecond) + select { + case <-done: + default: + t.Fatal("database did not close") + } +} + +// Ensure a database can provide a transactional block. +func TestDB_Update(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("expected nil value, got: %v", v) + } + if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a closed database returns an error while running a transaction block +func TestDB_Update_Closed(t *testing.T) { + var db bolt.DB + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_Update_ManualCommit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_Update_ManualRollback(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_View_ManualCommit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.View(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_View_ManualRollback(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.View(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a write transaction that panics does not hold open locks. +func TestDB_Update_Panic(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Panic during update but recover. + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: update", r) + } + }() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + panic("omg") + }); err != nil { + t.Fatal(err) + } + }() + + // Verify we can update again. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify that our change persisted. + if err := db.Update(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a database can return an error through a read-only transactional block. +func TestDB_View_Error(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.View(func(tx *bolt.Tx) error { + return errors.New("xxx") + }); err == nil || err.Error() != "xxx" { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure a read transaction that panics does not hold open locks. +func TestDB_View_Panic(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Panic during view transaction but recover. + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: view", r) + } + }() + + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + panic("omg") + }); err != nil { + t.Fatal(err) + } + }() + + // Verify that we can still use read transactions. + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that DB stats can be returned. +func TestDB_Stats(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + stats := db.Stats() + if stats.TxStats.PageCount != 2 { + t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount) + } else if stats.FreePageN != 0 { + t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN) + } else if stats.PendingPageN != 2 { + t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN) + } +} + +// Ensure that database pages are in expected order and type. +func TestDB_Consistency(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + if err := db.Update(func(tx *bolt.Tx) error { + if p, _ := tx.Page(0); p == nil { + t.Fatal("expected page") + } else if p.Type != "meta" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(1); p == nil { + t.Fatal("expected page") + } else if p.Type != "meta" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(2); p == nil { + t.Fatal("expected page") + } else if p.Type != "free" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(3); p == nil { + t.Fatal("expected page") + } else if p.Type != "free" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(4); p == nil { + t.Fatal("expected page") + } else if p.Type != "leaf" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(5); p == nil { + t.Fatal("expected page") + } else if p.Type != "freelist" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(6); p != nil { + t.Fatal("unexpected page") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that DB stats can be subtracted from one another. +func TestDBStats_Sub(t *testing.T) { + var a, b bolt.Stats + a.TxStats.PageCount = 3 + a.FreePageN = 4 + b.TxStats.PageCount = 10 + b.FreePageN = 14 + diff := b.Sub(&a) + if diff.TxStats.PageCount != 7 { + t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount) + } + + // free page stats are copied from the receiver and not subtracted + if diff.FreePageN != 14 { + t.Fatalf("unexpected FreePageN: %d", diff.FreePageN) + } +} + +// Ensure two functions can perform updates in a single batch. +func TestDB_Batch(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Iterate over multiple updates in separate goroutines. + n := 2 + ch := make(chan error) + for i := 0; i < n; i++ { + go func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + }(i) + } + + // Check all responses to make sure there's no error. + for i := 0; i < n; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < n; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestDB_Batch_Panic(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var sentinel int + var bork = &sentinel + var problem interface{} + var err error + + // Execute a function inside a batch that panics. + func() { + defer func() { + if p := recover(); p != nil { + problem = p + } + }() + err = db.Batch(func(tx *bolt.Tx) error { + panic(bork) + }) + }() + + // Verify there is no error. + if g, e := err, error(nil); g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } + // Verify the panic was captured. + if g, e := problem, bork; g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } +} + +func TestDB_BatchFull(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + const size = 3 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = size + // high enough to never trigger here + db.MaxBatchDelay = 1 * time.Hour + + go put(1) + go put(2) + + // Give the batch a chance to exhibit bugs. + time.Sleep(10 * time.Millisecond) + + // not triggered yet + select { + case <-ch: + t.Fatalf("batch triggered too early") + default: + } + + go put(3) + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestDB_BatchTime(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + const size = 1 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = 1000 + db.MaxBatchDelay = 0 + + go put(1) + + // Batch must trigger by time alone. + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func ExampleDB_Update() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Execute several commands within a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Read the value back from a separate read-only transaction. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value of 'foo' is: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value of 'foo' is: bar +} + +func ExampleDB_View() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Insert data into a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("people")) + if err != nil { + return err + } + if err := b.Put([]byte("john"), []byte("doe")); err != nil { + return err + } + if err := b.Put([]byte("susy"), []byte("que")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Access data from within a read-only transactional block. + if err := db.View(func(tx *bolt.Tx) error { + v := tx.Bucket([]byte("people")).Get([]byte("john")) + fmt.Printf("John's last name is %s.\n", v) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // John's last name is doe. +} + +func ExampleDB_Begin_ReadOnly() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Create a bucket using a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + log.Fatal(err) + } + + // Create several keys in a transaction. + tx, err := db.Begin(true) + if err != nil { + log.Fatal(err) + } + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("john"), []byte("blue")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("abby"), []byte("red")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil { + log.Fatal(err) + } + if err := tx.Commit(); err != nil { + log.Fatal(err) + } + + // Iterate over the values in sorted key order. + tx, err = db.Begin(false) + if err != nil { + log.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("%s likes %s\n", k, v) + } + + if err := tx.Rollback(); err != nil { + log.Fatal(err) + } + + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // abby likes red + // john likes blue + // zephyr likes purple +} + +func BenchmarkDBBatchAutomatic(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Batch(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchSingle(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Update(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchManual10x100(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for major := 0; major < 10; major++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + insert100 := func(tx *bolt.Tx) error { + h := fnv.New32a() + buf := make([]byte, 4) + for minor := uint32(0); minor < 100; minor++ { + binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) + h.Reset() + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + b := tx.Bucket([]byte("bench")) + if err := b.Put(k, []byte("filler")); err != nil { + return err + } + } + return nil + } + if err := db.Update(insert100); err != nil { + b.Fatal(err) + } + }(uint32(major)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func validateBatchBench(b *testing.B, db *DB) { + var rollback = errors.New("sentinel error to cause rollback") + validate := func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("bench")) + h := fnv.New32a() + buf := make([]byte, 4) + for id := uint32(0); id < 1000; id++ { + binary.LittleEndian.PutUint32(buf, id) + h.Reset() + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + v := bucket.Get(k) + if v == nil { + b.Errorf("not found id=%d key=%x", id, k) + continue + } + if g, e := v, []byte("filler"); !bytes.Equal(g, e) { + b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) + } + if err := bucket.Delete(k); err != nil { + return err + } + } + // should be empty now + c := bucket.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + b.Errorf("unexpected key: %x = %q", k, v) + } + return rollback + } + if err := db.Update(validate); err != nil && err != rollback { + b.Error(err) + } +} + +// DB is a test wrapper for bolt.DB. +type DB struct { + *bolt.DB +} + +// MustOpenDB returns a new, open DB at a temporary location. +func MustOpenDB() *DB { + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + panic(err) + } + return &DB{db} +} + +// Close closes the database and deletes the underlying file. +func (db *DB) Close() error { + // Log statistics. + if *statsFlag { + db.PrintStats() + } + + // Check database consistency after every test. + db.MustCheck() + + // Close database and remove file. + defer os.Remove(db.Path()) + return db.DB.Close() +} + +// MustClose closes the database and deletes the underlying file. Panic on error. +func (db *DB) MustClose() { + if err := db.Close(); err != nil { + panic(err) + } +} + +// PrintStats prints the database stats +func (db *DB) PrintStats() { + var stats = db.Stats() + fmt.Printf("[db] %-20s %-20s %-20s\n", + fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), + fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), + fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), + ) + fmt.Printf(" %-20s %-20s %-20s\n", + fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), + fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), + fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), + ) +} + +// MustCheck runs a consistency check on the database and panics if any errors are found. +func (db *DB) MustCheck() { + if err := db.Update(func(tx *bolt.Tx) error { + // Collect all the errors. + var errors []error + for err := range tx.Check() { + errors = append(errors, err) + if len(errors) > 10 { + break + } + } + + // If errors occurred, copy the DB and print the errors. + if len(errors) > 0 { + var path = tempfile() + if err := tx.CopyFile(path, 0600); err != nil { + panic(err) + } + + // Print errors. + fmt.Print("\n\n") + fmt.Printf("consistency check failed (%d errors)\n", len(errors)) + for _, err := range errors { + fmt.Println(err) + } + fmt.Println("") + fmt.Println("db saved to:") + fmt.Println(path) + fmt.Print("\n\n") + os.Exit(-1) + } + + return nil + }); err != nil && err != bolt.ErrDatabaseNotOpen { + panic(err) + } +} + +// CopyTempFile copies a database to a temporary file. +func (db *DB) CopyTempFile() { + path := tempfile() + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(path, 0600) + }); err != nil { + panic(err) + } + fmt.Println("db copied to: ", path) +} + +// tempfile returns a temporary file path. +func tempfile() string { + f, err := ioutil.TempFile("", "bolt-") + if err != nil { + panic(err) + } + if err := f.Close(); err != nil { + panic(err) + } + if err := os.Remove(f.Name()); err != nil { + panic(err) + } + return f.Name() +} + +// mustContainKeys checks that a bucket contains a given set of keys. +func mustContainKeys(b *bolt.Bucket, m map[string]string) { + found := make(map[string]string) + if err := b.ForEach(func(k, _ []byte) error { + found[string(k)] = "" + return nil + }); err != nil { + panic(err) + } + + // Check for keys found in bucket that shouldn't be there. + var keys []string + for k, _ := range found { + if _, ok := m[string(k)]; !ok { + keys = append(keys, k) + } + } + if len(keys) > 0 { + sort.Strings(keys) + panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) + } + + // Check for keys not found in bucket that should be there. + for k, _ := range m { + if _, ok := found[string(k)]; !ok { + keys = append(keys, k) + } + } + if len(keys) > 0 { + sort.Strings(keys) + panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) + } +} + +func trunc(b []byte, length int) []byte { + if length < len(b) { + return b[:length] + } + return b +} + +func truncDuration(d time.Duration) string { + return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") +} + +func fileSize(path string) int64 { + fi, err := os.Stat(path) + if err != nil { + return 0 + } + return fi.Size() +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +// u64tob converts a uint64 into an 8-byte slice. +func u64tob(v uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b +} + +// btou64 converts an 8-byte slice into an uint64. +func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 00000000000..cc937845dba --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 00000000000..a3620a3ebb2 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,71 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 00000000000..aba48f58c62 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,252 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, list := range f.pending { + m = append(m, list...) + } + sort.Sort(m) + mergepgids(dst, f.ids, m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool, len(f.ids)) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/freelist_test.go b/vendor/github.com/boltdb/bolt/freelist_test.go new file mode 100644 index 00000000000..4e9b3a8dbfa --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist_test.go @@ -0,0 +1,158 @@ +package bolt + +import ( + "math/rand" + "reflect" + "sort" + "testing" + "unsafe" +) + +// Ensure that a page is added to a transaction's freelist. +func TestFreelist_free(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12}) + if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) + } +} + +// Ensure that a page and its overflow is added to a transaction's freelist. +func TestFreelist_free_overflow(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 3}) + if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) + } +} + +// Ensure that a transaction's free pages can be released. +func TestFreelist_release(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 1}) + f.free(100, &page{id: 9}) + f.free(102, &page{id: 39}) + f.release(100) + f.release(101) + if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + f.release(102) + if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can find contiguous blocks of pages. +func TestFreelist_allocate(t *testing.T) { + f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} + if id := int(f.allocate(3)); id != 3 { + t.Fatalf("exp=3; got=%v", id) + } + if id := int(f.allocate(1)); id != 6 { + t.Fatalf("exp=6; got=%v", id) + } + if id := int(f.allocate(3)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(2)); id != 12 { + t.Fatalf("exp=12; got=%v", id) + } + if id := int(f.allocate(1)); id != 7 { + t.Fatalf("exp=7; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + if id := int(f.allocate(1)); id != 9 { + t.Fatalf("exp=9; got=%v", id) + } + if id := int(f.allocate(1)); id != 18 { + t.Fatalf("exp=18; got=%v", id) + } + if id := int(f.allocate(1)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can deserialize from a freelist page. +func TestFreelist_read(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = freelistPageFlag + page.count = 2 + + // Insert 2 page ids. + ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) + ids[0] = 23 + ids[1] = 50 + + // Deserialize page into a freelist. + f := newFreelist() + f.read(page) + + // Ensure that there are two page ids in the freelist. + if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can serialize into a freelist page. +func TestFreelist_write(t *testing.T) { + // Create a freelist and write it to a page. + var buf [4096]byte + f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} + f.pending[100] = []pgid{28, 11} + f.pending[101] = []pgid{3} + p := (*page)(unsafe.Pointer(&buf[0])) + if err := f.write(p); err != nil { + t.Fatal(err) + } + + // Read the page back out. + f2 := newFreelist() + f2.read(p) + + // Ensure that the freelist is correct. + // All pages should be present and in reverse order. + if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { + t.Fatalf("exp=%v; got=%v", exp, f2.ids) + } +} + +func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } +func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } +func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } +func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } + +func benchmark_FreelistRelease(b *testing.B, size int) { + ids := randomPgids(size) + pending := randomPgids(len(ids) / 400) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} + f.release(1) + } +} + +func randomPgids(n int) []pgid { + rand.Seed(42) + pgids := make(pgids, n) + for i := range pgids { + pgids[i] = pgid(rand.Int63()) + } + sort.Sort(pgids) + return pgids +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 00000000000..159318b229c --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,604 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/node_test.go b/vendor/github.com/boltdb/bolt/node_test.go new file mode 100644 index 00000000000..fa5d10f9990 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node_test.go @@ -0,0 +1,156 @@ +package bolt + +import ( + "testing" + "unsafe" +) + +// Ensure that a node can insert a key/value. +func TestNode_put(t *testing.T) { + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} + n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) + n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) + n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) + n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) + + if len(n.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if n.inodes[2].flags != uint32(leafPageFlag) { + t.Fatalf("not a leaf: %d", n.inodes[2].flags) + } +} + +// Ensure that a node can deserialize from a leaf page. +func TestNode_read_LeafPage(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = leafPageFlag + page.count = 2 + + // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 + nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) + nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 + nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 + + // Write data for the nodes at the end. + data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) + copy(data[:], []byte("barfooz")) + copy(data[7:], []byte("helloworldbye")) + + // Deserialize page into a leaf. + n := &node{} + n.read(page) + + // Check that there are two inodes with correct data. + if !n.isLeaf { + t.Fatal("expected leaf") + } + if len(n.inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } +} + +// Ensure that a node can serialize into a leaf page. +func TestNode_write_LeafPage(t *testing.T) { + // Create a node. + n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) + n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) + n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) + + // Write it to a page. + var buf [4096]byte + p := (*page)(unsafe.Pointer(&buf[0])) + n.write(p) + + // Read the page back in. + n2 := &node{} + n2.read(p) + + // Check that the two pages are the same. + if len(n2.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n2.inodes)) + } + if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } +} + +// Ensure that a node can split into appropriate subgroups. +func TestNode_split(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) + + // Split between 2 & 3. + n.split(100) + + var parent = n.parent + if len(parent.children) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children)) + } + if len(parent.children[0].inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) + } + if len(parent.children[1].inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) + } +} + +// Ensure that a page with the minimum number of inodes just returns a single node. +func TestNode_split_MinKeys(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + + // Split. + n.split(20) + if n.parent != nil { + t.Fatalf("expected nil parent") + } +} + +// Ensure that a node that has keys that all fit on a page just returns one leaf. +func TestNode_split_SinglePage(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) + + // Split. + n.split(4096) + if n.parent != nil { + t.Fatalf("expected nil parent") + } +} diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 00000000000..cde403ae86d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,197 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/github.com/boltdb/bolt/page_test.go b/vendor/github.com/boltdb/bolt/page_test.go new file mode 100644 index 00000000000..59f4a30ed83 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page_test.go @@ -0,0 +1,72 @@ +package bolt + +import ( + "reflect" + "sort" + "testing" + "testing/quick" +) + +// Ensure that the page type can be returned in human readable format. +func TestPage_typ(t *testing.T) { + if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { + t.Fatalf("exp=branch; got=%v", typ) + } + if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { + t.Fatalf("exp=leaf; got=%v", typ) + } + if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { + t.Fatalf("exp=meta; got=%v", typ) + } + if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { + t.Fatalf("exp=freelist; got=%v", typ) + } + if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { + t.Fatalf("exp=unknown<4e20>; got=%v", typ) + } +} + +// Ensure that the hexdump debugging function doesn't blow up. +func TestPage_dump(t *testing.T) { + (&page{id: 256}).hexdump(16) +} + +func TestPgids_merge(t *testing.T) { + a := pgids{4, 5, 6, 10, 11, 12, 13, 27} + b := pgids{1, 3, 8, 9, 25, 30} + c := a.merge(b) + if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { + t.Errorf("mismatch: %v", c) + } + + a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} + b = pgids{8, 9, 25, 30} + c = a.merge(b) + if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { + t.Errorf("mismatch: %v", c) + } +} + +func TestPgids_merge_quick(t *testing.T) { + if err := quick.Check(func(a, b pgids) bool { + // Sort incoming lists. + sort.Sort(a) + sort.Sort(b) + + // Merge the two lists together. + got := a.merge(b) + + // The expected value should be the two lists combined and sorted. + exp := append(a, b...) + sort.Sort(exp) + + if !reflect.DeepEqual(exp, got) { + t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) + return false + } + + return true + }, nil); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/boltdb/bolt/quick_test.go b/vendor/github.com/boltdb/bolt/quick_test.go new file mode 100644 index 00000000000..9e27792e1a0 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/quick_test.go @@ -0,0 +1,87 @@ +package bolt_test + +import ( + "bytes" + "flag" + "fmt" + "math/rand" + "os" + "reflect" + "testing/quick" + "time" +) + +// testing/quick defaults to 5 iterations and a random seed. +// You can override these settings from the command line: +// +// -quick.count The number of iterations to perform. +// -quick.seed The seed to use for randomizing. +// -quick.maxitems The maximum number of items to insert into a DB. +// -quick.maxksize The maximum size of a key. +// -quick.maxvsize The maximum size of a value. +// + +var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int + +func init() { + flag.IntVar(&qcount, "quick.count", 5, "") + flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") + flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") + flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") + flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") + flag.Parse() + fmt.Fprintln(os.Stderr, "seed:", qseed) + fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) +} + +func qconfig() *quick.Config { + return &quick.Config{ + MaxCount: qcount, + Rand: rand.New(rand.NewSource(int64(qseed))), + } +} + +type testdata []testdataitem + +func (t testdata) Len() int { return len(t) } +func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } + +func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { + n := rand.Intn(qmaxitems-1) + 1 + items := make(testdata, n) + used := make(map[string]bool) + for i := 0; i < n; i++ { + item := &items[i] + // Ensure that keys are unique by looping until we find one that we have not already used. + for { + item.Key = randByteSlice(rand, 1, qmaxksize) + if !used[string(item.Key)] { + used[string(item.Key)] = true + break + } + } + item.Value = randByteSlice(rand, 0, qmaxvsize) + } + return reflect.ValueOf(items) +} + +type revtestdata []testdataitem + +func (t revtestdata) Len() int { return len(t) } +func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } + +type testdataitem struct { + Key []byte + Value []byte +} + +func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { + n := rand.Intn(maxSize-minSize) + minSize + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} diff --git a/vendor/github.com/boltdb/bolt/simulation_test.go b/vendor/github.com/boltdb/bolt/simulation_test.go new file mode 100644 index 00000000000..38310165570 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/simulation_test.go @@ -0,0 +1,329 @@ +package bolt_test + +import ( + "bytes" + "fmt" + "math/rand" + "sync" + "testing" + + "github.com/boltdb/bolt" +) + +func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) } +func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } +func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } +func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } +func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } + +func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } +func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } +func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } +func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } + +func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } +func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } +func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } + +func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } + +// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. +func testSimulate(t *testing.T, threadCount, parallelism int) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + rand.Seed(int64(qseed)) + + // A list of operations that readers and writers can perform. + var readerHandlers = []simulateHandler{simulateGetHandler} + var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} + + var versions = make(map[int]*QuickDB) + versions[1] = NewQuickDB() + + db := MustOpenDB() + defer db.MustClose() + + var mutex sync.Mutex + + // Run n threads in parallel, each with their own operation. + var wg sync.WaitGroup + var threads = make(chan bool, parallelism) + var i int + for { + threads <- true + wg.Add(1) + writable := ((rand.Int() % 100) < 20) // 20% writers + + // Choose an operation to execute. + var handler simulateHandler + if writable { + handler = writerHandlers[rand.Intn(len(writerHandlers))] + } else { + handler = readerHandlers[rand.Intn(len(readerHandlers))] + } + + // Execute a thread for the given operation. + go func(writable bool, handler simulateHandler) { + defer wg.Done() + + // Start transaction. + tx, err := db.Begin(writable) + if err != nil { + t.Fatal("tx begin: ", err) + } + + // Obtain current state of the dataset. + mutex.Lock() + var qdb = versions[tx.ID()] + if writable { + qdb = versions[tx.ID()-1].Copy() + } + mutex.Unlock() + + // Make sure we commit/rollback the tx at the end and update the state. + if writable { + defer func() { + mutex.Lock() + versions[tx.ID()] = qdb + mutex.Unlock() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + }() + } else { + defer func() { _ = tx.Rollback() }() + } + + // Ignore operation if we don't have data yet. + if qdb == nil { + return + } + + // Execute handler. + handler(tx, qdb) + + // Release a thread back to the scheduling loop. + <-threads + }(writable, handler) + + i++ + if i > threadCount { + break + } + } + + // Wait until all threads are done. + wg.Wait() +} + +type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) + +// Retrieves a key from the database and verifies that it is what is expected. +func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { + // Randomly retrieve an existing exist. + keys := qdb.Rand() + if len(keys) == 0 { + return + } + + // Retrieve root bucket. + b := tx.Bucket(keys[0]) + if b == nil { + panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) + } + + // Drill into nested buckets. + for _, key := range keys[1 : len(keys)-1] { + b = b.Bucket(key) + if b == nil { + panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) + } + } + + // Verify key/value on the final bucket. + expected := qdb.Get(keys) + actual := b.Get(keys[len(keys)-1]) + if !bytes.Equal(actual, expected) { + fmt.Println("=== EXPECTED ===") + fmt.Println(expected) + fmt.Println("=== ACTUAL ===") + fmt.Println(actual) + fmt.Println("=== END ===") + panic("value mismatch") + } +} + +// Inserts a key into the database. +func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { + var err error + keys, value := randKeys(), randValue() + + // Retrieve root bucket. + b := tx.Bucket(keys[0]) + if b == nil { + b, err = tx.CreateBucket(keys[0]) + if err != nil { + panic("create bucket: " + err.Error()) + } + } + + // Create nested buckets, if necessary. + for _, key := range keys[1 : len(keys)-1] { + child := b.Bucket(key) + if child != nil { + b = child + } else { + b, err = b.CreateBucket(key) + if err != nil { + panic("create bucket: " + err.Error()) + } + } + } + + // Insert into database. + if err := b.Put(keys[len(keys)-1], value); err != nil { + panic("put: " + err.Error()) + } + + // Insert into in-memory database. + qdb.Put(keys, value) +} + +// QuickDB is an in-memory database that replicates the functionality of the +// Bolt DB type except that it is entirely in-memory. It is meant for testing +// that the Bolt database is consistent. +type QuickDB struct { + sync.RWMutex + m map[string]interface{} +} + +// NewQuickDB returns an instance of QuickDB. +func NewQuickDB() *QuickDB { + return &QuickDB{m: make(map[string]interface{})} +} + +// Get retrieves the value at a key path. +func (db *QuickDB) Get(keys [][]byte) []byte { + db.RLock() + defer db.RUnlock() + + m := db.m + for _, key := range keys[:len(keys)-1] { + value := m[string(key)] + if value == nil { + return nil + } + switch value := value.(type) { + case map[string]interface{}: + m = value + case []byte: + return nil + } + } + + // Only return if it's a simple value. + if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { + return value + } + return nil +} + +// Put inserts a value into a key path. +func (db *QuickDB) Put(keys [][]byte, value []byte) { + db.Lock() + defer db.Unlock() + + // Build buckets all the way down the key path. + m := db.m + for _, key := range keys[:len(keys)-1] { + if _, ok := m[string(key)].([]byte); ok { + return // Keypath intersects with a simple value. Do nothing. + } + + if m[string(key)] == nil { + m[string(key)] = make(map[string]interface{}) + } + m = m[string(key)].(map[string]interface{}) + } + + // Insert value into the last key. + m[string(keys[len(keys)-1])] = value +} + +// Rand returns a random key path that points to a simple value. +func (db *QuickDB) Rand() [][]byte { + db.RLock() + defer db.RUnlock() + if len(db.m) == 0 { + return nil + } + var keys [][]byte + db.rand(db.m, &keys) + return keys +} + +func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { + i, index := 0, rand.Intn(len(m)) + for k, v := range m { + if i == index { + *keys = append(*keys, []byte(k)) + if v, ok := v.(map[string]interface{}); ok { + db.rand(v, keys) + } + return + } + i++ + } + panic("quickdb rand: out-of-range") +} + +// Copy copies the entire database. +func (db *QuickDB) Copy() *QuickDB { + db.RLock() + defer db.RUnlock() + return &QuickDB{m: db.copy(db.m)} +} + +func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { + clone := make(map[string]interface{}, len(m)) + for k, v := range m { + switch v := v.(type) { + case map[string]interface{}: + clone[k] = db.copy(v) + default: + clone[k] = v + } + } + return clone +} + +func randKey() []byte { + var min, max = 1, 1024 + n := rand.Intn(max-min) + min + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} + +func randKeys() [][]byte { + var keys [][]byte + var count = rand.Intn(2) + 2 + for i := 0; i < count; i++ { + keys = append(keys, randKey()) + } + return keys +} + +func randValue() []byte { + n := rand.Intn(8192) + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 00000000000..bbb60ac9226 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,686 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. +// +// Deprecated; Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/boltdb/bolt/tx_test.go b/vendor/github.com/boltdb/bolt/tx_test.go new file mode 100644 index 00000000000..2201e79288c --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx_test.go @@ -0,0 +1,716 @@ +package bolt_test + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "testing" + + "github.com/boltdb/bolt" +) + +// Ensure that committing a closed transaction returns an error. +func TestTx_Commit_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + if err := tx.Commit(); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that rolling back a closed transaction returns an error. +func TestTx_Rollback_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if err := tx.Rollback(); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that committing a read-only transaction returns an error. +func TestTx_Commit_ErrTxNotWritable(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != bolt.ErrTxNotWritable { + t.Fatal(err) + } +} + +// Ensure that a transaction can retrieve a cursor on the root bucket. +func TestTx_Cursor(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("woojits")); err != nil { + t.Fatal(err) + } + + c := tx.Cursor() + if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Next(); k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", k) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that creating a bucket with a read-only transaction returns an error. +func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.View(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("foo")) + if err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that creating a bucket on a closed transaction returns an error. +func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that a Tx can retrieve a bucket. +func TestTx_Bucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx retrieving a non-existent key returns nil. +func TestTx_Get_NotFound(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if b.Get([]byte("no_such_key")) != nil { + t.Fatal("expected nil value") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can be created and retrieved. +func TestTx_CreateBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Read the bucket through a separate transaction. + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can be created if it doesn't already exist. +func TestTx_CreateBucketIfNotExists(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + // Create bucket. + if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } + + // Create bucket again. + if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } + + return nil + }); err != nil { + t.Fatal(err) + } + + // Read the bucket through a separate transaction. + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure transaction returns an error if creating an unnamed bucket. +func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + + if _, err := tx.CreateBucketIfNotExists(nil); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket cannot be created twice. +func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Create the same bucket again. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != bolt.ErrBucketExists { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket is created with a non-blank name. +func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can be deleted. +func TestTx_DeleteBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create a bucket and add a value. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Delete the bucket and make sure we can't get the value. + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + if tx.Bucket([]byte("widgets")) != nil { + t.Fatal("unexpected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + // Create the bucket again and make sure there's not a phantom value. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("unexpected phantom value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket on a closed transaction returns an error. +func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that deleting a bucket with a read-only transaction returns an error. +func TestTx_DeleteBucket_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that nothing happens when deleting a bucket that doesn't exist. +func TestTx_DeleteBucket_NotFound(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that no error is returned when a tx.ForEach function does not return +// an error. +func TestTx_ForEach_NoError(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return nil + }); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that an error is returned when a tx.ForEach function returns an error. +func TestTx_ForEach_WithError(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + marker := errors.New("marker") + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return marker + }); err != marker { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that Tx commit handlers are called after a transaction successfully commits. +func TestTx_OnCommit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var x int + if err := db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } else if x != 3 { + t.Fatalf("unexpected x: %d", x) + } +} + +// Ensure that Tx commit handlers are NOT called after a transaction rolls back. +func TestTx_OnCommit_Rollback(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var x int + if err := db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return errors.New("rollback this commit") + }); err == nil || err.Error() != "rollback this commit" { + t.Fatalf("unexpected error: %s", err) + } else if x != 0 { + t.Fatalf("unexpected x: %d", x) + } +} + +// Ensure that the database can be copied to a file path. +func TestTx_CopyFile(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + path := tempfile() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(path, 0600) + }); err != nil { + t.Fatal(err) + } + + db2, err := bolt.Open(path, 0600, nil) + if err != nil { + t.Fatal(err) + } + + if err := db2.View(func(tx *bolt.Tx) error { + if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { + t.Fatalf("unexpected value: %v", v) + } + if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db2.Close(); err != nil { + t.Fatal(err) + } +} + +type failWriterError struct{} + +func (failWriterError) Error() string { + return "error injected for tests" +} + +type failWriter struct { + // fail after this many bytes + After int +} + +func (f *failWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > f.After { + n = f.After + err = failWriterError{} + } + f.After -= n + return n, err +} + +// Ensure that Copy handles write errors right. +func TestTx_CopyFile_Error_Meta(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + return tx.Copy(&failWriter{}) + }); err == nil || err.Error() != "meta 0 copy: error injected for tests" { + t.Fatalf("unexpected error: %v", err) + } +} + +// Ensure that Copy handles write errors right. +func TestTx_CopyFile_Error_Normal(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + return tx.Copy(&failWriter{3 * db.Info().PageSize}) + }); err == nil || err.Error() != "error injected for tests" { + t.Fatalf("unexpected error: %v", err) + } +} + +func ExampleTx_Rollback() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Create a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + log.Fatal(err) + } + + // Set a value for a key. + if err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + }); err != nil { + log.Fatal(err) + } + + // Update the key but rollback the transaction so it never saves. + tx, err := db.Begin(true) + if err != nil { + log.Fatal(err) + } + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("foo"), []byte("baz")); err != nil { + log.Fatal(err) + } + if err := tx.Rollback(); err != nil { + log.Fatal(err) + } + + // Ensure that our original value is still set. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value for 'foo' is still: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value for 'foo' is still: bar +} + +func ExampleTx_CopyFile() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Create a bucket and a key. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Copy the database to another file. + toFile := tempfile() + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(toFile, 0666) + }); err != nil { + log.Fatal(err) + } + defer os.Remove(toFile) + + // Open the cloned database. + db2, err := bolt.Open(toFile, 0666, nil) + if err != nil { + log.Fatal(err) + } + + // Ensure that the key exists in the copy. + if err := db2.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value for 'foo' in the clone is: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + if err := db2.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value for 'foo' in the clone is: bar +} diff --git a/vendor/github.com/containers/buildah/.travis.yml b/vendor/github.com/containers/buildah/.travis.yml index f6e2a97ec8d..ba3a3aa22a2 100644 --- a/vendor/github.com/containers/buildah/.travis.yml +++ b/vendor/github.com/containers/buildah/.travis.yml @@ -68,7 +68,7 @@ script: - docker ps --all - docker images - ls -alF /home/travis/auth - - docker pull alpine + - docker pull docker.io/alpine - echo testpassword | docker login localhost:5000 --username testuser --password-stdin - docker tag alpine localhost:5000/my-alpine - docker push localhost:5000/my-alpine diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index d473bdc5d4a..b9da496f135 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -1,26 +1,27 @@ SELINUXTAG := $(shell ./selinux_tag.sh) STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh) SECURITYTAGS ?= seccomp $(SELINUXTAG) -TAGS ?= "$(SECURITYTAGS) $(STORAGETAGS)" +TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) +BUILDTAGS += $(TAGS) PREFIX := /usr/local BINDIR := $(PREFIX)/bin -BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions -BUILDFLAGS := -tags ${TAGS} +BASHINSTALLDIR = $(PREFIX)/share/bash-completion/completions +BUILDFLAGS := -tags "$(BUILDTAGS)" BUILDAH := buildah GO := go GO110 := 1.10 GOVERSION := $(findstring $(GO110),$(shell go version)) -GIT_COMMIT := $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) +GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) BUILD_INFO := $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) CNI_COMMIT := $(if $(shell sed -e '\,github.com/containernetworking/cni, !d' -e 's,.* ,,g' vendor.conf),$(shell sed -e '\,github.com/containernetworking/cni, !d' -e 's,.* ,,g' vendor.conf),$(error "sed failed")) -STATIC_STORAGETAGS="containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" +STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" RUNC_COMMIT := 2c632d1a2de0192c3f18a2542ccb6f30a8719b1f LIBSECCOMP_COMMIT := release-2.3 EXTRALDFLAGS := -LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} -X main.buildInfo=${BUILD_INFO} -X main.cniVersion=${CNI_COMMIT}' ${EXTRALDFLAGS} -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/cli/*.go pkg/parse/*.go unshare/*.c unshare/*.go util/*.go +LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(BUILD_INFO) -X main.cniVersion=$(CNI_COMMIT)' $(EXTRALDFLAGS) +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go unshare/*.c unshare/*.go util/*.go all: buildah imgtype docs @@ -30,7 +31,7 @@ static: $(SOURCES) .PHONY: binary binary: $(SOURCES) - $(GO) build $(LDFLAGS) -o ${BUILDAH} $(BUILDFLAGS) ./cmd/buildah + $(GO) build $(LDFLAGS) -o $(BUILDAH) $(BUILDFLAGS) ./cmd/buildah buildah: binary @@ -43,7 +44,7 @@ imgtype: *.go docker/*.go util/*.go tests/imgtype/imgtype.go .PHONY: clean clean: $(RM) -r buildah imgtype build buildah.static - $(MAKE) -C docs clean + $(MAKE) -C docs clean .PHONY: docs docs: ## build the docs on the host @@ -107,11 +108,11 @@ install: uninstall: rm -f $(DESTDIR)/$(BINDIR)/buildah rm -f $(PREFIX)/share/man/man1/buildah*.1 - rm -f $(DESTDIR)/${BASHINSTALLDIR}/buildah + rm -f $(DESTDIR)/$(BASHINSTALLDIR)/buildah .PHONY: install.completions install.completions: - install -m 644 -D contrib/completions/bash/buildah $(DESTDIR)/${BASHINSTALLDIR}/buildah + install -m 644 -D contrib/completions/bash/buildah $(DESTDIR)/$(BASHINSTALLDIR)/buildah .PHONY: install.runc install.runc: diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index 2b539bba89e..12eafdf888e 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -105,6 +105,7 @@ $ sudo ./lighttpd.sh | [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. | | [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | | [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. | +| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. | | [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. | | [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. | | [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. | diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 91ce2b09d41..cbdb5c9f959 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -317,6 +317,10 @@ type BuilderOptions struct { // the registry together, can not be resolved to a reference to a // source image. No separator is implicitly added. Transport string + // PullBlobDirectory is the name of a directory in which we'll attempt + // to store copies of layer blobs that we pull down, if any. It should + // already exist. + PullBlobDirectory string // Mount signals to NewBuilder() that the container should be mounted // immediately. Mount bool diff --git a/vendor/github.com/containers/buildah/cmd/buildah/bud.go b/vendor/github.com/containers/buildah/cmd/buildah/bud.go index 631e7f851dc..65f262b49f4 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/bud.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/bud.go @@ -178,6 +178,11 @@ func budCmd(c *cli.Context) error { logrus.Debugf("--compress option specified but is ignored") } + compression := imagebuildah.Gzip + if c.Bool("disable-compression") { + compression = imagebuildah.Uncompressed + } + if c.IsSet("disable-content-trust") { logrus.Debugf("--disable-content-trust option specified but is ignored") } @@ -195,7 +200,7 @@ func budCmd(c *cli.Context) error { options := imagebuildah.BuildOptions{ ContextDirectory: contextDir, PullPolicy: pullPolicy, - Compression: imagebuildah.Gzip, + Compression: compression, Quiet: c.Bool("quiet"), SignaturePolicyPath: c.String("signature-policy"), Args: args, @@ -227,6 +232,7 @@ func budCmd(c *cli.Context) error { NoCache: c.Bool("no-cache"), RemoveIntermediateCtrs: c.BoolT("rm"), ForceRmIntermediateCtrs: c.Bool("force-rm"), + BlobDirectory: c.String("blob-cache"), } if c.Bool("quiet") { diff --git a/vendor/github.com/containers/buildah/cmd/buildah/commit.go b/vendor/github.com/containers/buildah/cmd/buildah/commit.go index 25e26aa3ab2..7799724d931 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/commit.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/commit.go @@ -23,6 +23,12 @@ var ( Name: "authfile", Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", }, + cli.StringFlag{ + Name: "blob-cache", + Value: "", + Usage: "assume image blobs in the specified directory will be available for pushing", + Hidden: true, // this is here mainly so that we can test the API during integration tests + }, cli.StringFlag{ Name: "cert-dir", Value: "", @@ -165,6 +171,7 @@ func commitCmd(c *cli.Context) error { SystemContext: systemContext, IIDFile: c.String("iidfile"), Squash: c.Bool("squash"), + BlobDirectory: c.String("blob-cache"), } if !c.Bool("quiet") { options.ReportWriter = os.Stderr @@ -173,10 +180,14 @@ func commitCmd(c *cli.Context) error { if err != nil { return util.GetFailureCause(err, errors.Wrapf(err, "error committing container %q to %q", builder.Container, image)) } - if ref != nil { + if ref != nil && id != "" { logrus.Debugf("wrote image %s with ID %s", ref, id) - } else { + } else if ref != nil { + logrus.Debugf("wrote image %s", ref) + } else if id != "" { logrus.Debugf("wrote image with ID %s", id) + } else { + logrus.Debugf("wrote image") } if options.IIDFile == "" && id != "" { fmt.Printf("%s\n", id) diff --git a/vendor/github.com/containers/buildah/cmd/buildah/common.go b/vendor/github.com/containers/buildah/cmd/buildah/common.go index fbef58e8f88..1a8359c6527 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/common.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/common.go @@ -21,7 +21,7 @@ import ( var needToShutdownStore = false func getStore(c *cli.Context) (storage.Store, error) { - options, err := lu.GetDefaultStoreOptions() + options, _, err := lu.GetDefaultStoreOptions() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/buildah/cmd/buildah/config.go b/vendor/github.com/containers/buildah/cmd/buildah/config.go index a13241e2823..29078f0d39a 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/config.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/config.go @@ -244,7 +244,11 @@ func updateConfig(builder *buildah.Builder, c *cli.Context) { builder.SetDomainname(c.String("domainname")) } if c.IsSet("hostname") { - builder.SetHostname(c.String("hostname")) + name := c.String("hostname") + if name != "" && builder.Format == buildah.OCIv1ImageManifest { + logrus.Errorf("HOSTNAME is not supported for OCI V1 image format, hostname %s will be ignored. Must use `docker` format", name) + } + builder.SetHostname(name) } if c.IsSet("onbuild") { for _, onbuild := range c.StringSlice("onbuild") { diff --git a/vendor/github.com/containers/buildah/cmd/buildah/containers.go b/vendor/github.com/containers/buildah/cmd/buildah/containers.go index d4b7d855335..ac14d766994 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/containers.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/containers.go @@ -80,6 +80,7 @@ var ( containersDescription = "Lists containers which appear to be " + buildah.Package + " working containers, their\n names and IDs, and the names and IDs of the images from which they were\n initialized" containersCommand = cli.Command{ Name: "containers", + Aliases: []string{"list", "ls", "ps"}, Usage: "List working containers and their base images", Description: containersDescription, Flags: sortFlags(containersFlags), diff --git a/vendor/github.com/containers/buildah/cmd/buildah/from.go b/vendor/github.com/containers/buildah/cmd/buildah/from.go index f2e420c91b2..b286a2e6c25 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/from.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/from.go @@ -243,6 +243,7 @@ func fromCmd(c *cli.Context) error { DropCapabilities: c.StringSlice("cap-drop"), CommonBuildOpts: commonOpts, Format: format, + PullBlobDirectory: c.String("blob-cache"), } if !c.Bool("quiet") { diff --git a/vendor/github.com/containers/buildah/cmd/buildah/images.go b/vendor/github.com/containers/buildah/cmd/buildah/images.go index 20ee505effe..f4dfd778925 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/images.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/images.go @@ -273,20 +273,8 @@ func outputImages(ctx context.Context, images []storage.Image, store storage.Sto // images without names should be printed with "" as the image name names = append(names, ":") } - if opts.quiet && argName == "" { - fmt.Printf("%-64s\n", image.ID) - // We only want to print each id once - continue - } - if opts.json && argName == "" { - JSONImage := jsonImage{ID: image.ID, Names: image.Names} - data, err2 := json.MarshalIndent(JSONImage, "", " ") - if err2 != nil { - return err2 - } - fmt.Printf("%s\n", data) - continue - } + + outer: for name, tags := range imagebuildah.ReposToMap(names) { for _, tag := range tags { if !matchesReference(name+":"+tag, argName) { @@ -300,7 +288,7 @@ func outputImages(ctx context.Context, images []storage.Image, store storage.Sto if opts.quiet { fmt.Printf("%-64s\n", image.ID) // We only want to print each id once - break + break outer } if opts.json { JSONImage := jsonImage{ID: image.ID, Names: image.Names} @@ -309,7 +297,8 @@ func outputImages(ctx context.Context, images []storage.Image, store storage.Sto return err2 } fmt.Printf("%s\n", data) - break + // We only want to print each id once + break outer } params := imageOutputParams{ Tag: tag, @@ -356,9 +345,9 @@ func matchesFilter(ctx context.Context, image storage.Image, store storage.Store } func matchesDangling(name string, dangling string) bool { - if dangling == "false" && name != "" { + if dangling == "false" && !strings.Contains(name, "") { return true - } else if dangling == "true" && name == "" { + } else if dangling == "true" && strings.Contains(name, "") { return true } return false diff --git a/vendor/github.com/containers/buildah/cmd/buildah/info.go b/vendor/github.com/containers/buildah/cmd/buildah/info.go new file mode 100644 index 00000000000..f610e1c08ba --- /dev/null +++ b/vendor/github.com/containers/buildah/cmd/buildah/info.go @@ -0,0 +1,105 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "regexp" + "runtime" + "text/template" + + "github.com/containers/buildah" + "github.com/containers/buildah/pkg/parse" + "github.com/pkg/errors" + "github.com/urfave/cli" + "golang.org/x/crypto/ssh/terminal" +) + +var ( + infoDescription = "The information displayed pertains to the host and current storage statistics which is useful when reporting issues." + infoFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "debug, D", + Usage: "display additional debug information", + }, + cli.StringFlag{ + Name: "format", + Usage: "use `format` as a Go template to format the output", + }, + } + infoCommand = cli.Command{ + Name: "info", + Usage: "Display Buildah system information", + Description: infoDescription, + Action: infoCmd, + Flags: sortFlags(infoFlags), + SkipArgReorder: true, + UseShortOptionHandling: true, + } +) + +func infoCmd(c *cli.Context) error { + if len(c.Args()) > 0 { + return errors.New("'buildah info' does not accept arguments") + } + + if err := parse.ValidateFlags(c, infoFlags); err != nil { + return err + } + info := map[string]interface{}{} + + store, err := getStore(c) + if err != nil { + return err + } + + infoArr, err := buildah.Info(store) + if err != nil { + return errors.Wrapf(err, "error getting info") + } + + if c.Bool("debug") { + debugInfo := debugInfo(c) + infoArr = append(infoArr, buildah.InfoData{Type: "debug", Data: debugInfo}) + } + + for _, currInfo := range infoArr { + info[currInfo.Type] = currInfo.Data + } + + if c.IsSet("format") { + format := c.String("format") + if matched, err := regexp.MatchString("{{.*}}", format); err != nil { + return errors.Wrapf(err, "error validating format provided: %s", format) + } else if !matched { + return errors.Errorf("error invalid format provided: %s", format) + } + t, err := template.New("format").Parse(format) + if err != nil { + return errors.Wrapf(err, "Template parsing error") + } + if err = t.Execute(os.Stdout, info); err != nil { + return err + } + if terminal.IsTerminal(int(os.Stdout.Fd())) { + fmt.Println() + } + return nil + } + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + if terminal.IsTerminal(int(os.Stdout.Fd())) { + enc.SetEscapeHTML(false) + } + return enc.Encode(info) +} + +// top-level "debug" info +func debugInfo(c *cli.Context) map[string]interface{} { + info := map[string]interface{}{} + info["compiler"] = runtime.Compiler + info["go version"] = runtime.Version() + info["buildah version"] = buildah.Version + info["git commit"] = GitCommit + return info +} diff --git a/vendor/github.com/containers/buildah/cmd/buildah/main.go b/vendor/github.com/containers/buildah/cmd/buildah/main.go index 978f74881e6..b9c05cb0de5 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/main.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/main.go @@ -20,7 +20,7 @@ func main() { if buildah.InitReexec() { return } - storageOptions, err := util.GetDefaultStoreOptions() + storageOptions, _, err := util.GetDefaultStoreOptions() if err != nil { logrus.Errorf(err.Error()) cli.OsExiter(1) @@ -110,6 +110,7 @@ func main() { copyCommand, fromCommand, imagesCommand, + infoCommand, inspectCommand, mountCommand, pullCommand, diff --git a/vendor/github.com/containers/buildah/cmd/buildah/mount.go b/vendor/github.com/containers/buildah/cmd/buildah/mount.go index 34836e42375..b38440ddd8a 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/mount.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/mount.go @@ -48,6 +48,14 @@ func mountCmd(c *cli.Context) error { var lastError error if len(args) > 0 { + // Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part + // of the mount command. + // Differently, allow the mount if we are already in a userns, as the mount point will still + // be accessible once "buildah mount" exits. + if os.Getenv(startedInUserNS) != "" && store.GraphDriverName() != "vfs" { + return fmt.Errorf("cannot mount using driver %s in rootless mode", store.GraphDriverName()) + } + for _, name := range args { builder, err := openBuilder(getContext(), store, name) if err != nil { diff --git a/vendor/github.com/containers/buildah/cmd/buildah/pull.go b/vendor/github.com/containers/buildah/cmd/buildah/pull.go index 9d0d86dc791..83481a11683 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/pull.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/pull.go @@ -20,6 +20,12 @@ var ( Name: "authfile", Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", }, + cli.StringFlag{ + Name: "blob-cache", + Value: "", + Usage: "store copies of pulled image blobs in the specified directory", + Hidden: true, // this is here mainly so that we can test the API during integration tests + }, cli.StringFlag{ Name: "cert-dir", Value: "", @@ -98,6 +104,7 @@ func pullCmd(c *cli.Context) error { SignaturePolicyPath: signaturePolicy, Store: store, SystemContext: systemContext, + BlobDirectory: c.String("blob-cache"), } if !c.Bool("quiet") { diff --git a/vendor/github.com/containers/buildah/cmd/buildah/push.go b/vendor/github.com/containers/buildah/cmd/buildah/push.go index 53d0d55c7c3..f5a539dc3e1 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/push.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/push.go @@ -25,6 +25,12 @@ var ( Name: "authfile", Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", }, + cli.StringFlag{ + Name: "blob-cache", + Value: "", + Usage: "assume image blobs in the specified directory will be available for pushing", + Hidden: true, // this is here mainly so that we can test the API during integration tests + }, cli.StringFlag{ Name: "cert-dir", Value: "", @@ -160,6 +166,7 @@ func pushCmd(c *cli.Context) error { SignaturePolicyPath: c.String("signature-policy"), Store: store, SystemContext: systemContext, + BlobDirectory: c.String("blob-cache"), } if !c.Bool("quiet") { options.ReportWriter = os.Stderr diff --git a/vendor/github.com/containers/buildah/cmd/buildah/unshare.go b/vendor/github.com/containers/buildah/cmd/buildah/unshare.go index d6d9aff55db..514e0c33e64 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/unshare.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/unshare.go @@ -68,13 +68,16 @@ func maybeReexecUsingUserNamespace(c *cli.Context, evenForRoot bool) { return } + var uidNum, gidNum uint64 // Figure out who we are. me, err := user.Current() - bailOnError(err, "error determining current user") - uidNum, err := strconv.ParseUint(me.Uid, 10, 32) - bailOnError(err, "error parsing current UID %s", me.Uid) - gidNum, err := strconv.ParseUint(me.Gid, 10, 32) - bailOnError(err, "error parsing current GID %s", me.Gid) + if !os.IsNotExist(err) { + bailOnError(err, "error determining current user") + uidNum, err = strconv.ParseUint(me.Uid, 10, 32) + bailOnError(err, "error parsing current UID %s", me.Uid) + gidNum, err = strconv.ParseUint(me.Gid, 10, 32) + bailOnError(err, "error parsing current GID %s", me.Gid) + } runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -204,7 +207,7 @@ func unshareCmd(c *cli.Context) error { args = []string{shell} } cmd := exec.Command(args[0], args[1:]...) - cmd.Env = append(os.Environ(), "USER=root", "USERNAME=root", "GROUP=root", "LOGNAME=root", "UID=0", "GID=0") + cmd.Env = append(os.Environ(), "USER=root", "USERNAME=root", "GROUP=root", "LOGNAME=root", "UID=0", "GID=0", "_BUILDAH_STARTED_IN_USERNS=") cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/vendor/github.com/containers/buildah/cmd/buildah/unshare_unsupported.go b/vendor/github.com/containers/buildah/cmd/buildah/unshare_unsupported.go index 7f49c393e83..0738c4146d5 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/unshare_unsupported.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/unshare_unsupported.go @@ -6,6 +6,10 @@ import ( "github.com/urfave/cli" ) +const ( + startedInUserNS = "_BUILDAH_STARTED_IN_USERNS" +) + var ( unshareCommand = cli.Command{ Name: "unshare", diff --git a/vendor/github.com/containers/buildah/cmd/buildah/version.go b/vendor/github.com/containers/buildah/cmd/buildah/version.go index 9e424722828..b6b88368e01 100644 --- a/vendor/github.com/containers/buildah/cmd/buildah/version.go +++ b/vendor/github.com/containers/buildah/cmd/buildah/version.go @@ -16,7 +16,7 @@ import ( //Overwritten at build time var ( - gitCommit string + GitCommit string buildInfo string cniVersion string ) @@ -43,7 +43,7 @@ func versionCmd(c *cli.Context) error { fmt.Println("Runtime Spec: ", rspecs.Version) fmt.Println("CNI Spec: ", cniversion.Current()) fmt.Println("libcni Version: ", cniVersion) - fmt.Println("Git Commit: ", gitCommit) + fmt.Println("Git Commit: ", GitCommit) //Prints out the build time in readable format fmt.Println("Built: ", time.Unix(buildTime, 0).Format(time.ANSIC)) diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index 71696b432be..591ead4e6ab 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "time" + "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" cp "github.com/containers/image/copy" "github.com/containers/image/docker/reference" @@ -55,6 +56,12 @@ type CommitOptions struct { // Squash tells the builder to produce an image with a single layer // instead of with possibly more than one layer. Squash bool + // BlobDirectory is the name of a directory in which we'll look for + // prebuilt copies of layer blobs that we might otherwise need to + // regenerate from on-disk layers. If blobs are available, the + // manifest of the new image will reference the blobs rather than + // on-disk layers. + BlobDirectory string // OnBuild is a list of commands to be run by images based on this image OnBuild []string @@ -85,6 +92,11 @@ type PushOptions struct { // ManifestType is the format to use when saving the imge using the 'dir' transport // possible options are oci, v2s1, and v2s2 ManifestType string + // BlobDirectory is the name of a directory in which we'll look for + // prebuilt copies of layer blobs that we might otherwise need to + // regenerate from on-disk layers, substituting them in the list of + // blobs to copy whenever possible. + BlobDirectory string } // Commit writes the contents of the container, along with its updated @@ -128,13 +140,37 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } } } - src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.Compression, options.HistoryTimestamp) + src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp) if err != nil { return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) } + var maybeCachedSrc types.ImageReference = src + var maybeCachedDest types.ImageReference = dest + if options.BlobDirectory != "" { + compress := types.PreserveOriginal + if options.Compression != archive.Uncompressed { + compress = types.Compress + } + cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory) + } + maybeCachedSrc = cache + cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress) + if err != nil { + return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory) + } + maybeCachedDest = cache + } // "Copy" our image to where it needs to be. + switch options.Compression { + case archive.Uncompressed: + systemContext.OCIAcceptUncompressedLayers = true + case archive.Gzip: + systemContext.DirForceCompress = true + } var manifestBytes []byte - if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, "")); err != nil { + if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil { return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID) } if len(options.AdditionalTags) > 0 { @@ -209,10 +245,28 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options if err != nil { return nil, "", err } + var maybeCachedSrc types.ImageReference = src + if options.BlobDirectory != "" { + compress := types.PreserveOriginal + if options.Compression != archive.Uncompressed { + compress = types.Compress + } + cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress) + if err != nil { + return nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory) + } + maybeCachedSrc = cache + } // Copy everything. + switch options.Compression { + case archive.Uncompressed: + systemContext.OCIAcceptUncompressedLayers = true + case archive.Gzip: + systemContext.DirForceCompress = true + } var manifestBytes []byte - if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, options.ManifestType)); err != nil { - return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(src), transports.ImageName(dest)) + if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil { + return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest)) } if options.ReportWriter != nil { fmt.Fprintf(options.ReportWriter, "") diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go index be59215df55..e369dc4076e 100644 --- a/vendor/github.com/containers/buildah/common.go +++ b/vendor/github.com/containers/buildah/common.go @@ -6,10 +6,8 @@ import ( "path/filepath" cp "github.com/containers/image/copy" - "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/containers/libpod/pkg/rootless" - "github.com/sirupsen/logrus" ) const ( @@ -34,13 +32,6 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference } } - sourceInsecure, err := isReferenceInsecure(sourceReference, sourceCtx) - if err != nil { - logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(sourceReference), err) - } else if sourceInsecure { - sourceCtx.DockerInsecureSkipTLSVerify = true - sourceCtx.OCIInsecureSkipTLSVerify = true - } destinationCtx := &types.SystemContext{} if destinationSystemContext != nil { @@ -52,13 +43,6 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference } } } - destinationInsecure, err := isReferenceInsecure(destinationReference, destinationCtx) - if err != nil { - logrus.Debugf("error determining if registry for %q is insecure: %v", transports.ImageName(destinationReference), err) - } else if destinationInsecure { - destinationCtx.DockerInsecureSkipTLSVerify = true - destinationCtx.OCIInsecureSkipTLSVerify = true - } return &cp.Options{ ReportWriter: reportWriter, diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index 3609694f670..114b8ca373f 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -474,9 +474,6 @@ func (b *Builder) Hostname() string { // Note: this setting is not present in the OCIv1 image format, so it is // discarded when writing images using OCIv1 formats. func (b *Builder) SetHostname(name string) { - if name != "" && b.Format != Dockerv2ImageManifest { - logrus.Errorf("HOSTNAME is not supported for OCI image format, hostname %s will be ignored. Must use `docker` format", name) - } b.Docker.Config.Hostname = name } diff --git a/vendor/github.com/containers/buildah/contrib/completions/bash/buildah b/vendor/github.com/containers/buildah/contrib/completions/bash/buildah index bbec42bf405..fda84885b1b 100644 --- a/vendor/github.com/containers/buildah/contrib/completions/bash/buildah +++ b/vendor/github.com/containers/buildah/contrib/completions/bash/buildah @@ -74,11 +74,11 @@ __buildah_previous_extglob_setting=$(shopt -p extglob) shopt -s extglob __buildah_list_containers() { - COMPREPLY=($(compgen -W "$(buildah containers -q)" -- $cur)) + COMPREPLY=($(compgen -W "$(buildah containers --format '{{.ContainerName}} {{.ContainerID}}' )" -- $cur)) } __buildah_list_images() { - COMPREPLY=($(compgen -W "$(buildah images -q)" -- $cur)) + COMPREPLY=($(compgen -W "$(buildah images --format '{{.ID}} {{.Name}}' )" -- $cur)) } __buildah_pos_first_nonflag() { @@ -404,6 +404,7 @@ return 1 --network --no-pivot --pid + --platform --runtime --runtime-flag --security-opt @@ -651,7 +652,19 @@ return 1 esac } - _buildah_containers() { + _buildah_ps() { + _buildah_containers + } + +_buildah_list() { + _buildah_containers + } + +_buildah_ls() { + _buildah_containers + } + +_buildah_containers() { local boolean_options=" --help -h @@ -711,6 +724,22 @@ return 1 esac } + _buildah_info() { + local options_with_args=" + --debug + --D + --format + " + + local all_options="$options_with_args" + + case "$cur" in + -*) + COMPREPLY=($(compgen -W "$options_with_args" -- "$cur")) + ;; + esac +} + _buildah_inspect() { local options_with_args=" --format @@ -846,10 +875,14 @@ return 1 delete from images + info inspect + list + ls mount pull push + ps rename rm rmi diff --git a/vendor/github.com/containers/buildah/docs/buildah-bud.md b/vendor/github.com/containers/buildah/docs/buildah-bud.md index ae347275ffb..69cbd64d879 100644 --- a/vendor/github.com/containers/buildah/docs/buildah-bud.md +++ b/vendor/github.com/containers/buildah/docs/buildah-bud.md @@ -170,6 +170,10 @@ The [username[:password]] to use to authenticate with the registry if required. If one or both values are not supplied, a command line prompt will appear and the value can be entered. The password is entered without echo. +**--disable-compression, -D** + +Don't default to compressing filesystem layers when building the image. + **--disable-content-trust** This is a Docker specific option to disable image verification to a Docker @@ -294,6 +298,12 @@ that the PID namespace in which `buildah` itself is being run should be reused, or it can be the path to a PID namespace which is already in use by another process. +**--platform**="Linux" + +This option has no effect on the build. Other container engines use this option +to control the execution platform for the build (e.g., Windows, Linux) which is +not required for Buildah as it supports only Linux. + **--pull** Pull the image if it is not present. If this flag is disabled (with diff --git a/vendor/github.com/containers/buildah/docs/buildah-commit.md b/vendor/github.com/containers/buildah/docs/buildah-commit.md index b2d3021f4dd..870ae6f8b6b 100644 --- a/vendor/github.com/containers/buildah/docs/buildah-commit.md +++ b/vendor/github.com/containers/buildah/docs/buildah-commit.md @@ -34,7 +34,7 @@ value can be entered. The password is entered without echo. **--disable-compression, -D** -Don't compress filesystem layers when building the image. +Don't default to compressing filesystem layers when building the image. **--format** diff --git a/vendor/github.com/containers/buildah/docs/buildah-info.md b/vendor/github.com/containers/buildah/docs/buildah-info.md new file mode 100644 index 00000000000..2731db4de70 --- /dev/null +++ b/vendor/github.com/containers/buildah/docs/buildah-info.md @@ -0,0 +1,71 @@ +# buildah-info "1" "November 2018" "Buildah" + +## NAME +buildah\-info - Display Buildah system information. + +## SYNOPSIS +**buildah info** [*options*] + +## DESCRIPTION +The information displayed pertains to the host and current storage statistics which is useful when reporting issues. + +## OPTIONS +**--debug, -D** +Show additional information. + +**--format** *template* + +Use *template* as a Go template when formatting the output. + +## EXAMPLE +Run buildah info response: +``` +$ buildah info +{ + "host": { + "Distribution": { + "distribution": "ubuntu", + "version": "18.04" + }, + "MemTotal": 16702980096, + "MenFree": 309428224, + "SwapFree": 2146693120, + "SwapTotal": 2147479552, + "arch": "amd64", + "cpus": 4, + "hostname": "localhost.localdomain", + "kernel": "4.15.0-36-generic", + "os": "linux", + "rootless": false, + "uptime": "91h 30m 59.9s (Approximately 3.79 days)" + }, + "store": { + "ContainerStore": { + "number": 2 + }, + "GraphDriverName": "overlay", + "GraphOptions": [ + "overlay.override_kernel_check=true" + ], + "GraphRoot": "/var/lib/containers/storage", + "GraphStatus": { + "Backing Filesystem": "extfs", + "Native Overlay Diff": "true", + "Supports d_type": "true" + }, + "ImageStore": { + "number": 1 + }, + "RunRoot": "/var/run/containers/storage" + } +} +``` + +Run buildah info and retrieve only the store information: +``` +$ buildah info --format={{".store"}} +map[GraphOptions:[overlay.override_kernel_check=true] GraphStatus:map[Backing Filesystem:extfs Supports d_type:true Native Overlay Diff:true] ImageStore:map[number:1] ContainerStore:map[number:2] GraphRoot:/var/lib/containers/storage RunRoot:/var/run/containers/storage GraphDriverName:overlay] +``` + +## SEE ALSO +buildah(1) diff --git a/vendor/github.com/containers/buildah/docs/buildah-mount.md b/vendor/github.com/containers/buildah/docs/buildah-mount.md index 86867b003a9..a43aacb499f 100644 --- a/vendor/github.com/containers/buildah/docs/buildah-mount.md +++ b/vendor/github.com/containers/buildah/docs/buildah-mount.md @@ -13,6 +13,14 @@ accessed from the host, and returns its location. If the mount command is invoked without any arguments, the tool will list all of the currently mounted containers. +When running in rootless mode, mount runs in a different namespace so +that the mounted volume might not be accessible from the host when +using a driver different than `vfs`. To be able to access the file +system mounted, you might need to create the mount namespace +separately as part of `buildah unshare`. In the environment created +with `buildah unshare` you can then use `buildah mount` and have +access to the mounted file system. + ## RETURN VALUE The location of the mounted file system. On error an empty string and errno is returned. diff --git a/vendor/github.com/containers/buildah/docs/buildah-push.md b/vendor/github.com/containers/buildah/docs/buildah-push.md index 478dd180a92..014edfb8cd8 100644 --- a/vendor/github.com/containers/buildah/docs/buildah-push.md +++ b/vendor/github.com/containers/buildah/docs/buildah-push.md @@ -30,7 +30,7 @@ Image stored in local container/storage An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest. **docker-daemon:**_docker-reference_ - An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID). + An image _docker_reference_ stored in the docker daemon internal storage. If _docker_reference_ does not begin with a valid registry name (a domain name containing "." or the reserved name "localhost") then the default registry name "docker.io" will be prepended. _docker_reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID). **oci:**_path_**:**_tag_ An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_. diff --git a/vendor/github.com/containers/buildah/docs/buildah.md b/vendor/github.com/containers/buildah/docs/buildah.md index c220d4e942f..86072f77190 100644 --- a/vendor/github.com/containers/buildah/docs/buildah.md +++ b/vendor/github.com/containers/buildah/docs/buildah.md @@ -104,8 +104,10 @@ Print the version | buildah-copy(1) | Copies the contents of a file, URL, or directory into a container's working directory. | | buildah-from(1) | Creates a new working container, either from scratch or using a specified image as a starting point. | | buildah-images(1) | List images in local storage. | +| buildah-info(1) | Display Buildah system information. | | buildah-inspect(1) | Inspects the configuration of a container or image | | buildah-mount(1) | Mount the working container's root filesystem. | +| buildah-pull(1) | Pull an image from the specified location. | | buildah-push(1) | Push an image from local storage to elsewhere. | | buildah-rename(1) | Rename a local container. | | buildah-rm(1) | Removes one or more working containers. | diff --git a/vendor/github.com/containers/buildah/docs/release-announcements/v1.5.md b/vendor/github.com/containers/buildah/docs/release-announcements/v1.5.md new file mode 100644 index 00000000000..76ca92f18ca --- /dev/null +++ b/vendor/github.com/containers/buildah/docs/release-announcements/v1.5.md @@ -0,0 +1,81 @@ +# Buildah version 1.5 Release Announcement + +![buildah logo](https://buildah.io/images/buildah.png) + +We're pleased to announce the release of Buildah version 1.5 which is now available from GitHub for any Linux distro. We are shipping this release on Fedora, RHEL 7, CentOS, openSUSE and Ubuntu in the near future. + +The Buildah project has continued to grow over the past several weeks, welcoming several new contributors to the mix. Updates were made to rootless user handling, added support for a few Dockerfile commands that were missing, a number of performance changes for the underlying pull commands and bug fixes. + +## The major highlights for this release are: + +* A variety of updates were made in rootless user handling: + - Let runc auto-detect rootless capability. This will allow for different runtimes to run in rootless mode. + - If slirp4netns is available, it is now used to configure the network for the rootless isolation mode. + - Support for fuse-overlayfs has been added. + - Usernamespaces are now created only when they are needed. + +* Dockerfile handling improvements. + - If the ARG command was the first line in the Dockerfile, Buildah did not process it. This has been corrected. + - The “FROM {image} AS {reference}” command in a Dockerfile is now supported. + +* A number of performance changes have been made to the underlying pull functionality. + +* If a directory was passed the ‘bud’ commands --file parameter a panic would occur even if a Dockerfile was in the directory. This has been corrected. + +## Release Changes + +* rootless: do not specify --rootless to the OCI runtime. +* rootless: use slirp4netns to setup the network namespace. +* rootless: only discard network configuration names. +* run: only set up /etc/hosts or /etc/resolv.conf with network. +* run: bind mount /etc/hosts and /etc/resolv.conf if not in a volume. +* Handle directories better in ‘bud --file’. +* common: support a per-user registries conf file. +* unshare: do not override the configuration. +* common: honor the rootless configuration file. +* unshare: create a new mount namespace. +* unshare: support libpod rootless pkg. +* Use libpod GetDefaultStorage to report proper storage config. +* Allow container storage to manage the SELinux labels. +* When the value of isolation is provided to the run command, use the value provided instead of the default value. +* Fix ‘buildah version’ error when build time could not be determined on some systems. +* Walk symlinks when checking cached images for copied/added files. +* ReserveSELinuxLabels(): handle wrapped errors from OpenBuilder. +* Set WorkingDir to empty, not ‘/’ for conformance to Docker. +* Allow setting --no-pivot default with an env var. +* Add the --no-pivot flag to the run command. +* Improve reporting about individual pull failures. +* Return a "search registries were needed but empty" indication in util.ResolveName. +* Simplify handling of the "tried to pull an image but found nothing" case. +* Don't even invoke the pull loop if options.FromImage == "". +* Eliminate the long-running ref and img variables in resolveImage. +* In resolveImage, return immediately on success. +* Fix From As in Dockerfile. +* Sort CLI flags of buildah bud. +* unshare: detect when unprivileged userns are disabled. +* chroot: fix the args check. +* buildah: use the same logic for XDG_RUNTIME_DIR as podman. +* Podman --privileged selinux is broken. +* parse: Modify the return value. +* parse: modify the verification of the isolation value. +* Make sure we log or return every error. +* pullImage(): when completing an image name, try docker://. +* Enforce "blocked" for registries for the "docker" transport. +* Correctly set DockerInsecureSkipTLSVerify when pulling images. +* chroot: set up seccomp and capabilities after supplemental groups. +* chroot: fix capabilities list setup and application. +* namespaces.bats: fix handling of uidmap/gidmap options in pairs. +* chroot: only create user namespaces when we know we need them. +* Check /proc/sys/user/max_user_namespaces on unshare(NEWUSERNS). +* bash/buildah: add isolation option to the from command for bash completions. +* Updated the following packages to newer versions: containers/image, containers/storage, libpod, and opencontainers/selinux. +* Plus a number of smaller fixes. + +## Try it Out. + +If you haven’t yet, install Buildah from one of the Linux repos or GitHub and give it a spin. We’re betting you'll find it’s an easy and quick way to build containers in your environment without a daemon being involved! + +For those of you who contributed to this release, thank you very much for your contributions! If you haven't joined our community yet, don't wait any longer! Come join us in GitHub, where Open Source communities live. + +## Buildah == Simplicity + diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index c0bf90ddda1..47842db7d0e 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -57,22 +57,24 @@ type containerImageRef struct { squash bool tarPath func(path string) (io.ReadCloser, error) parent string + blobDirectory string } type containerImageSource struct { - path string - ref *containerImageRef - store storage.Store - containerID string - mountLabel string - layerID string - names []string - compression archive.Compression - config []byte - configDigest digest.Digest - manifest []byte - manifestType string - exporting bool + path string + ref *containerImageRef + store storage.Store + containerID string + mountLabel string + layerID string + names []string + compression archive.Compression + config []byte + configDigest digest.Digest + manifest []byte + manifestType string + exporting bool + blobDirectory string } func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) { @@ -105,11 +107,11 @@ func expectedDockerDiffIDs(image docker.V2Image) int { // Compute the media types which we need to attach to a layer, given the type of // compression that we'll be applying. -func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) { +func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) { omediaType = v1.MediaTypeImageLayer dmediaType = docker.V2S2MediaTypeUncompressedLayer - if i.compression != archive.Uncompressed { - switch i.compression { + if layerCompression != archive.Uncompressed { + switch layerCompression { case archive.Gzip: omediaType = v1.MediaTypeImageLayerGzip dmediaType = manifest.DockerV2Schema2LayerMediaType @@ -280,19 +282,21 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // The default layer media type assumes no compression. omediaType := v1.MediaTypeImageLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer + // Look up this layer. + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) + } // If we're not re-exporting the data, and we're reusing layers individually, reuse // the blobsum and diff IDs. if !i.exporting && !i.squash && layerID != i.layerID { - layer, err2 := i.store.Layer(layerID) - if err2 != nil { - return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) - } if layer.UncompressedDigest == "" { return nil, errors.Errorf("unable to look up size of layer %q", layerID) } layerBlobSum := layer.UncompressedDigest layerBlobSize := layer.UncompressedSize - // Note this layer in the manifest, using the uncompressed blobsum. + diffID := layer.UncompressedDigest + // Note this layer in the manifest, using the appropriate blobsum. olayerDescriptor := v1.Descriptor{ MediaType: omediaType, Digest: layerBlobSum, @@ -305,13 +309,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System Size: layerBlobSize, } dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Note this layer in the list of diffIDs, again using the uncompressed blobsum. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum) + // Note this layer in the list of diffIDs, again using the uncompressed digest. + oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID) + dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID) continue } - // Figure out if we need to change the media type, in case we're using compression. - omediaType, dmediaType, err = i.computeLayerMIMEType(what) + // Figure out if we need to change the media type, in case we've changed the compression. + omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression) if err != nil { return nil, err } @@ -368,8 +372,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } logrus.Debugf("%s size is %d bytes", what, size) // Rename the layer so that we can more easily find it by digest later. - if err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())); err != nil { - return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) + finalBlobName := filepath.Join(path, destHasher.Digest().String()) + if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil { + return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName) } // Add a note in the manifest about the layer. The blobs are identified by their possibly- // compressed blob digests. @@ -472,19 +477,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System panic("unreachable code: unsupported manifest type") } src = &containerImageSource{ - path: path, - ref: i, - store: i.store, - containerID: i.containerID, - mountLabel: i.mountLabel, - layerID: i.layerID, - names: i.names, - compression: i.compression, - config: config, - configDigest: digest.Canonical.FromBytes(config), - manifest: imageManifest, - manifestType: manifestType, - exporting: i.exporting, + path: path, + ref: i, + store: i.store, + containerID: i.containerID, + mountLabel: i.mountLabel, + layerID: i.layerID, + names: i.names, + compression: i.compression, + config: config, + configDigest: digest.Canonical.FromBytes(config), + manifest: imageManifest, + manifestType: manifestType, + exporting: i.exporting, + blobDirectory: i.blobDirectory, } return src, nil } @@ -551,7 +557,11 @@ func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.B return nil, nil } -func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) { +func (i *containerImageSource) HasThreadSafeGetBlob() bool { + return false +} + +func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) { if blob.Digest == i.configDigest { logrus.Debugf("start reading config") reader := bytes.NewReader(i.config) @@ -561,7 +571,16 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) } return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil } - layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600) + var layerFile *os.File + for _, path := range []string{i.blobDirectory, i.path} { + layerFile, err = os.OpenFile(filepath.Join(path, blob.Digest.String()), os.O_RDONLY, 0600) + if err == nil { + break + } + if !os.IsNotExist(err) { + logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), path, err) + } + } if err != nil { logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) return nil, -1, errors.Wrapf(err, "error opening file %q to buffer layer blob", filepath.Join(i.path, blob.Digest.String())) @@ -584,7 +603,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil } -func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { +func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { @@ -630,6 +649,7 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa squash: squash, tarPath: b.tarPath(), parent: parent, + blobDirectory: blobDirectory, } return ref, nil } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 618a0ff6f4c..217bcfc799b 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -10,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "strconv" "strings" "time" @@ -168,6 +169,8 @@ type BuildOptions struct { // ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if // the build was unsuccessful. ForceRmIntermediateCtrs bool + // BlobDirectory is a directory which we'll use for caching layer blobs. + BlobDirectory string } // Executor is a buildah-based implementation of the imagebuilder.Executor @@ -224,10 +227,23 @@ type Executor struct { containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build imageMap map[string]string // Used to map images that we create to handle the AS construct. copyFrom string // Used to keep track of the --from flag from COPY and ADD + blobDirectory string +} + +// builtinAllowedBuildArgs is list of built-in allowed build args +var builtinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, } // withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. -func (b *Executor) withName(name string, index int) *Executor { +func (b *Executor) withName(name string, index int, from string) *Executor { if b.named == nil { b.named = make(map[string]*Executor) } @@ -236,6 +252,7 @@ func (b *Executor) withName(name string, index int) *Executor { copied.name = name child := &copied b.named[name] = child + b.named[from] = child if idx := strconv.Itoa(index); idx != name { b.named[idx] = child } @@ -500,6 +517,7 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error { Hostname: config.Hostname, Runtime: b.runtime, Args: b.runtimeArgs, + NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", Mounts: convertMounts(b.transientMounts), Env: config.Env, User: config.User, @@ -597,6 +615,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { noCache: options.NoCache, removeIntermediateCtrs: options.RemoveIntermediateCtrs, forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, + blobDirectory: options.BlobDirectory, } if exec.err == nil { exec.err = os.Stderr @@ -652,6 +671,7 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s PullPolicy: b.pullPolicy, Registry: b.registry, Transport: b.transport, + PullBlobDirectory: b.blobDirectory, SignaturePolicyPath: b.signaturePolicyPath, ReportWriter: b.reportWriter, SystemContext: b.systemContext, @@ -794,12 +814,28 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error commitName := b.output b.containerIDs = nil + var leftoverArgs []string + for arg := range b.builder.Args { + if !builtinAllowedBuildArgs[arg] { + leftoverArgs = append(leftoverArgs, arg) + } + } for i, node := range node.Children { step := ib.Step() if err := step.Resolve(node); err != nil { return errors.Wrapf(err, "error resolving step %+v", *node) } logrus.Debugf("Parsed Step: %+v", *step) + if step.Command == "arg" { + for index, arg := range leftoverArgs { + for _, Arg := range step.Args { + list := strings.SplitN(Arg, "=", 2) + if arg == list[0] { + leftoverArgs = append(leftoverArgs[:index], leftoverArgs[index+1:]...) + } + } + } + } if !b.quiet { b.log("%s", step.Original) } @@ -896,6 +932,9 @@ func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error } } } + if len(leftoverArgs) > 0 { + fmt.Fprintf(b.out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } return nil } @@ -1196,6 +1235,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created SystemContext: b.systemContext, IIDFile: b.iidfile, Squash: b.squash, + BlobDirectory: b.blobDirectory, Parent: b.builder.FromImageID, } imgID, ref, _, err := b.builder.Commit(ctx, imageRef, options) @@ -1221,8 +1261,16 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin b.imageMap = make(map[string]string) stageCount := 0 for _, stage := range stages { - stageExecutor = b.withName(stage.Name, stage.Position) - if err := stageExecutor.Prepare(ctx, stage, ""); err != nil { + ib := stage.Builder + node := stage.Node + base, err := ib.From(node) + if err != nil { + logrus.Debugf("Build(node.Children=%#v)", node.Children) + return "", nil, err + } + + stageExecutor = b.withName(stage.Name, stage.Position, base) + if err := stageExecutor.Prepare(ctx, stage, base); err != nil { return "", nil, err } // Always remove the intermediate/build containers, even if the build was unsuccessful. @@ -1260,7 +1308,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin var imageRef reference.Canonical imageID := "" - if !b.layers && !b.noCache { + + // Check if we have a one line Dockerfile making layers irrelevant + // or the user told us to ignore layers. + ignoreLayers := (len(stages) < 2 && len(stages[0].Node.Children) < 2) || (!b.layers && !b.noCache) + + if ignoreLayers { imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "") if err != nil { return "", nil, err @@ -1361,6 +1414,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt dockerfiles = append(dockerfiles, data) } + + dockerfiles = processCopyFrom(dockerfiles) + mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0]) if err != nil { return "", nil, errors.Wrapf(err, "error parsing main Dockerfile") @@ -1384,6 +1440,80 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt return exec.Build(ctx, stages) } +// processCopyFrom goes through the Dockerfiles and handles any 'COPY --from' instances +// prepending a new FROM statement the Dockerfile that do not already have a corresponding +// FROM command within them. +func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser { + + var newDockerfiles []io.ReadCloser + // fromMap contains the names of the images seen in a FROM + // line in the Dockerfiles. The boolean value just completes the map object. + fromMap := make(map[string]bool) + // asMap contains the names of the images seen after a "FROM image AS" + // line in the Dockefiles. The boolean value just completes the map object. + asMap := make(map[string]bool) + + copyRE := regexp.MustCompile(`\s*COPY\s+--from=`) + fromRE := regexp.MustCompile(`\s*FROM\s+`) + asRE := regexp.MustCompile(`(?i)\s+as\s+`) + for _, dfile := range dockerfiles { + if dfileBinary, err := ioutil.ReadAll(dfile); err == nil { + dfileString := fmt.Sprintf("%s", dfileBinary) + copyFromContent := copyRE.Split(dfileString, -1) + // no "COPY --from=", just continue + if len(copyFromContent) < 2 { + newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString))) + continue + } + // Load all image names in our Dockerfiles into a map + // for easy reference later. + fromContent := fromRE.Split(dfileString, -1) + for i := 0; i < len(fromContent); i++ { + imageName := strings.Split(fromContent[i], " ") + if len(imageName) > 0 { + finalImage := strings.Split(imageName[0], "\n") + if finalImage[0] != "" { + fromMap[strings.TrimSpace(finalImage[0])] = true + } + } + } + logrus.Debug("fromMap: ", fromMap) + + // Load all image names associated with an 'as' or 'AS' in + // our Dockerfiles into a map for easy reference later. + asContent := asRE.Split(dfileString, -1) + // Skip the first entry in the array as it's stuff before + // the " as " and we don't care. + for i := 1; i < len(asContent); i++ { + asName := strings.Split(asContent[i], " ") + if len(asName) > 0 { + finalAsImage := strings.Split(asName[0], "\n") + if finalAsImage[0] != "" { + asMap[strings.TrimSpace(finalAsImage[0])] = true + } + } + } + logrus.Debug("asMap: ", asMap) + + for i := 1; i < len(copyFromContent); i++ { + fromArray := strings.Split(copyFromContent[i], " ") + // If the image isn't a stage number or already declared, + // add a FROM statement for it to the top of our Dockerfile. + trimmedFrom := strings.TrimSpace(fromArray[0]) + _, okFrom := fromMap[trimmedFrom] + _, okAs := asMap[trimmedFrom] + _, err := strconv.Atoi(trimmedFrom) + if !okFrom && !okAs && err != nil { + from := "FROM " + trimmedFrom + newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(from))) + } + } + newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString))) + } // End if dfileBinary, err := ioutil.ReadAll(dfile); err == nil + } // End for _, dfile := range dockerfiles { + return newDockerfiles +} + // deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs // and deletes the containers associated with that ID. func (b *Executor) deleteSuccessfulIntermediateCtrs() error { diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go index edb5837dbb9..6feedf6a56c 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go +++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go @@ -131,6 +131,11 @@ func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) { func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { var timeIsGreater bool + // the Walk below doesn't work if rootdir and path are equal + if rootdir == path { + return false, nil + } + // Convert historyTime from string to time.Time for comparison histTime, err := time.Parse(time.RFC3339Nano, historyTime) if err != nil { diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go new file mode 100644 index 00000000000..8cd5e4438fc --- /dev/null +++ b/vendor/github.com/containers/buildah/info.go @@ -0,0 +1,207 @@ +package buildah + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "runtime" + "strconv" + "strings" + "time" + + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/storage" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// InfoData holds the info type, i.e store, host etc and the data for each type +type InfoData struct { + Type string + Data map[string]interface{} +} + +// Info returns the store and host information +func Info(store storage.Store) ([]InfoData, error) { + info := []InfoData{} + // get host information + hostInfo, err := hostInfo() + if err != nil { + logrus.Error(err, "error getting host info") + } + info = append(info, InfoData{Type: "host", Data: hostInfo}) + + // get store information + storeInfo, err := storeInfo(store) + if err != nil { + logrus.Error(err, "error getting store info") + } + info = append(info, InfoData{Type: "store", Data: storeInfo}) + return info, nil +} + +func hostInfo() (map[string]interface{}, error) { + info := map[string]interface{}{} + info["os"] = runtime.GOOS + info["arch"] = runtime.GOARCH + info["cpus"] = runtime.NumCPU() + info["rootless"] = rootless.IsRootless() + mi, err := system.ReadMemInfo() + if err != nil { + logrus.Error(err, "err reading memory info") + info["MemTotal"] = "" + info["MenFree"] = "" + info["SwapTotal"] = "" + info["SwapFree"] = "" + } else { + info["MemTotal"] = mi.MemTotal + info["MenFree"] = mi.MemFree + info["SwapTotal"] = mi.SwapTotal + info["SwapFree"] = mi.SwapFree + } + hostDistributionInfo := getHostDistributionInfo() + info["Distribution"] = map[string]interface{}{ + "distribution": hostDistributionInfo["Distribution"], + "version": hostDistributionInfo["Version"], + } + + kv, err := readKernelVersion() + if err != nil { + logrus.Error(err, "error reading kernel version") + } + info["kernel"] = kv + + up, err := readUptime() + if err != nil { + logrus.Error(err, "error reading up time") + } + // Convert uptime in seconds to a human-readable format + upSeconds := up + "s" + upDuration, err := time.ParseDuration(upSeconds) + if err != nil { + logrus.Error(err, "error parsing system uptime") + } + + hoursFound := false + var timeBuffer bytes.Buffer + var hoursBuffer bytes.Buffer + for _, elem := range upDuration.String() { + timeBuffer.WriteRune(elem) + if elem == 'h' || elem == 'm' { + timeBuffer.WriteRune(' ') + if elem == 'h' { + hoursFound = true + } + } + if !hoursFound { + hoursBuffer.WriteRune(elem) + } + } + + info["uptime"] = timeBuffer.String() + if hoursFound { + hours, err := strconv.ParseFloat(hoursBuffer.String(), 64) + if err == nil { + days := hours / 24 + info["uptime"] = fmt.Sprintf("%s (Approximately %.2f days)", info["uptime"], days) + } + } + + host, err := os.Hostname() + if err != nil { + logrus.Error(err, "error getting hostname") + } + info["hostname"] = host + + return info, nil + +} + +// top-level "store" info +func storeInfo(store storage.Store) (map[string]interface{}, error) { + // lets say storage driver in use, number of images, number of containers + info := map[string]interface{}{} + info["GraphRoot"] = store.GraphRoot() + info["RunRoot"] = store.RunRoot() + info["GraphDriverName"] = store.GraphDriverName() + info["GraphOptions"] = store.GraphOptions() + statusPairs, err := store.Status() + if err != nil { + return nil, err + } + status := map[string]string{} + for _, pair := range statusPairs { + status[pair[0]] = pair[1] + } + info["GraphStatus"] = status + images, err := store.Images() + if err != nil { + logrus.Error(err, "error getting number of images") + } + info["ImageStore"] = map[string]interface{}{ + "number": len(images), + } + + containers, err := store.Containers() + if err != nil { + logrus.Error(err, "error getting number of containers") + } + info["ContainerStore"] = map[string]interface{}{ + "number": len(containers), + } + + return info, nil +} + +func readKernelVersion() (string, error) { + buf, err := ioutil.ReadFile("/proc/version") + if err != nil { + return "", err + } + f := bytes.Fields(buf) + if len(f) < 2 { + return string(bytes.TrimSpace(buf)), nil + } + return string(f[2]), nil +} + +func readUptime() (string, error) { + buf, err := ioutil.ReadFile("/proc/uptime") + if err != nil { + return "", err + } + f := bytes.Fields(buf) + if len(f) < 1 { + return "", fmt.Errorf("invalid uptime") + } + return string(f[0]), nil +} + +// getHostDistributionInfo returns a map containing the host's distribution and version +func getHostDistributionInfo() map[string]string { + dist := make(map[string]string) + + // Populate values in case we cannot find the values + // or the file + dist["Distribution"] = "unknown" + dist["Version"] = "unknown" + + f, err := os.Open("/etc/os-release") + if err != nil { + return dist + } + defer f.Close() + + l := bufio.NewScanner(f) + for l.Scan() { + if strings.HasPrefix(l.Text(), "ID=") { + dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=") + } + if strings.HasPrefix(l.Text(), "VERSION_ID=") { + dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"") + } + } + return dist +} diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 13bebf4206d..7e7f97e4935 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -34,6 +34,7 @@ func pullAndFindImage(ctx context.Context, store storage.Store, imageName string Store: store, SystemContext: options.SystemContext, Transport: options.Transport, + BlobDirectory: options.PullBlobDirectory, } ref, err := pullImage(ctx, store, imageName, pullOptions, sc) if err != nil { diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go new file mode 100644 index 00000000000..31e6a428c38 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -0,0 +1,535 @@ +package blobcache + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/containers/buildah/docker" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/manifest" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + _ types.ImageReference = &blobCacheReference{} + _ types.ImageSource = &blobCacheSource{} + _ types.ImageDestination = &blobCacheDestination{} +) + +const ( + compressedNote = ".compressed" + decompressedNote = ".decompressed" +) + +// BlobCache is an object which saves copies of blobs that are written to it while passing them +// through to some real destination, and which can be queried directly in order to read them +// back. +type BlobCache interface { + types.ImageReference + // HasBlob checks if a blob that matches the passed-in digest (and + // size, if not -1), is present in the cache. + HasBlob(types.BlobInfo) (bool, int64, error) + // Directories returns the list of cache directories. + Directory() string + // ClearCache() clears the contents of the cache directories. Note + // that this also clears content which was not placed there by this + // cache implementation. + ClearCache() error +} + +type blobCacheReference struct { + reference types.ImageReference + // WARNING: The contents of this directory may be accessed concurrently, + // both within this process and by multiple different processes + directory string + compress types.LayerCompression +} + +type blobCacheSource struct { + reference *blobCacheReference + source types.ImageSource + sys types.SystemContext + // this mutex synchronizes the counters below + mu sync.Mutex + cacheHits int64 + cacheMisses int64 + cacheErrors int64 +} + +type blobCacheDestination struct { + reference *blobCacheReference + destination types.ImageDestination +} + +func makeFilename(blobSum digest.Digest, isConfig bool) string { + if isConfig { + return blobSum.String() + ".config" + } + return blobSum.String() +} + +// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are +// written to the destination image created from the resulting reference will also be stored +// as-is to the specifed directory or a temporary directory. The cache directory's contents +// can be cleared by calling the returned BlobCache()'s ClearCache() method. +// The compress argument controls whether or not the cache will try to substitute a compressed +// or different version of a blob when preparing the list of layers when reading an image. +func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) { + if directory == "" { + return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) + } + switch compress { + case types.Compress, types.Decompress, types.PreserveOriginal: + // valid value, accept it + default: + return nil, errors.Errorf("unhandled LayerCompression value %v", compress) + } + return &blobCacheReference{ + reference: ref, + directory: directory, + compress: compress, + }, nil +} + +func (r *blobCacheReference) Transport() types.ImageTransport { + return r.reference.Transport() +} + +func (r *blobCacheReference) StringWithinTransport() string { + return r.reference.StringWithinTransport() +} + +func (r *blobCacheReference) DockerReference() reference.Named { + return r.reference.DockerReference() +} + +func (r *blobCacheReference) PolicyConfigurationIdentity() string { + return r.reference.PolicyConfigurationIdentity() +} + +func (r *blobCacheReference) PolicyConfigurationNamespaces() []string { + return r.reference.PolicyConfigurationNamespaces() +} + +func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return r.reference.DeleteImage(ctx, sys) +} + +func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, nil + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig)) + fileInfo, err := os.Stat(filename) + if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { + return true, fileInfo.Size(), nil + } + if !os.IsNotExist(err) { + return false, -1, errors.Wrapf(err, "error checking size of %q", filename) + } + } + + return false, -1, nil +} + +func (r *blobCacheReference) Directory() string { + return r.directory +} + +func (r *blobCacheReference) ClearCache() error { + f, err := os.Open(r.directory) + if err != nil { + return errors.Wrapf(err, "error opening directory %q", r.directory) + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + return errors.Wrapf(err, "error reading directory %q", r.directory) + } + for _, name := range names { + pathname := filepath.Join(r.directory, name) + if err = os.RemoveAll(pathname); err != nil { + return errors.Wrapf(err, "error removing %q while clearing cache for %q", pathname, transports.ImageName(r)) + } + } + return nil +} + +func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference)) + } + return image.FromSource(ctx, sys, src) +} + +func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + src, err := r.reference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference)) + } + logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress) + return &blobCacheSource{reference: r, source: src, sys: *sys}, nil +} + +func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + dest, err := r.reference.NewImageDestination(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference)) + } + logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory) + return &blobCacheDestination{reference: r, destination: dest}, nil +} + +func (s *blobCacheSource) Reference() types.ImageReference { + return s.reference +} + +func (s *blobCacheSource) Close() error { + logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) + return s.source.Close() +} + +func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) + manifestBytes, err := ioutil.ReadFile(filename) + if err == nil { + s.cacheHits++ + return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil + } + if !os.IsNotExist(err) { + s.cacheErrors++ + return nil, "", errors.Wrapf(err, "error checking for manifest file %q", filename) + } + } + s.cacheMisses++ + return s.source.GetManifest(ctx, instanceDigest) +} + +func (s *blobCacheSource) HasThreadSafeGetBlob() bool { + return s.source.HasThreadSafeGetBlob() +} + +func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + present, size, err := s.reference.HasBlob(blobinfo) + if err != nil { + return nil, -1, err + } + if present { + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + s.mu.Lock() + s.cacheHits++ + s.mu.Unlock() + return f, size, nil + } + if !os.IsNotExist(err) { + s.mu.Lock() + s.cacheErrors++ + s.mu.Unlock() + return nil, -1, errors.Wrapf(err, "error checking for cache file %q", filepath.Join(s.reference.directory, filename)) + } + } + } + s.mu.Lock() + s.cacheMisses++ + s.mu.Unlock() + rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) + if err != nil { + return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) + } + return rc, size, nil +} + +func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.source.GetSignatures(ctx, instanceDigest) +} + +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignatures(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) + } + canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) + + infos, err := s.source.LayerInfosForCopy(ctx) + if err != nil { + return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + if infos == nil { + image, err := s.reference.NewImage(ctx, &s.sys) + if err != nil { + return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + defer image.Close() + infos = image.LayerInfos() + } + + if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { + replacedInfos := make([]types.BlobInfo, 0, len(infos)) + for _, info := range infos { + var replaceDigest []byte + var err error + blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) + alternate := "" + switch s.reference.compress { + case types.Compress: + alternate = blobFile + compressedNote + replaceDigest, err = ioutil.ReadFile(alternate) + case types.Decompress: + alternate = blobFile + decompressedNote + replaceDigest, err = ioutil.ReadFile(alternate) + } + if err == nil && digest.Digest(replaceDigest).Validate() == nil { + alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) + fileInfo, err := os.Stat(alternate) + if err == nil { + logrus.Debugf("suggesting cached blob with digest %q and compression %v in place of blob with digest %q", string(replaceDigest), s.reference.compress, info.Digest.String()) + info.Digest = digest.Digest(replaceDigest) + info.Size = fileInfo.Size() + switch info.MediaType { + case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: + switch s.reference.compress { + case types.Compress: + info.MediaType = v1.MediaTypeImageLayerGzip + case types.Decompress: + info.MediaType = v1.MediaTypeImageLayer + } + case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType: + switch s.reference.compress { + case types.Compress: + info.MediaType = manifest.DockerV2Schema2LayerMediaType + case types.Decompress: + info.MediaType = docker.V2S2MediaTypeUncompressedLayer + } + } + } + } + replacedInfos = append(replacedInfos, info) + } + infos = replacedInfos + } + + return infos, nil +} + +func (d *blobCacheDestination) Reference() types.ImageReference { + return d.reference +} + +func (d *blobCacheDestination) Close() error { + logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) + return d.destination.Close() +} + +func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { + return d.destination.SupportedManifestMIMETypes() +} + +func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { + return d.destination.SupportsSignatures(ctx) +} + +func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { + return d.destination.DesiredLayerCompression() +} + +func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { + return d.destination.AcceptsForeignLayerURLs() +} + +func (d *blobCacheDestination) MustMatchRuntimeOS() bool { + return d.destination.MustMatchRuntimeOS() +} + +func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { + return d.destination.IgnoresEmbeddedDockerReference() +} + +// Decompress and save the contents of the decompressReader stream into the passed-in temporary +// file. If we successfully save all of the data, rename the file to match the digest of the data, +// and make notes about the relationship between the file that holds a copy of the compressed data +// and this new file. +func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { + defer wg.Done() + // Decompress from and digest the reading end of that pipe. + decompressed, err3 := archive.DecompressStream(decompressReader) + digester := digest.Canonical.Digester() + if err3 == nil { + // Read the decompressed data through the filter over the pipe, blocking until the + // writing end is closed. + _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) + } else { + // Drain the pipe to keep from stalling the PutBlob() thread. + io.Copy(ioutil.Discard, decompressReader) + } + decompressReader.Close() + decompressed.Close() + tempFile.Close() + // Determine the name that we should give to the uncompressed copy of the blob. + decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) + if err3 == nil { + // Rename the temporary file. + if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { + logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } else { + *alternateDigest = digester.Digest() + // Note the relationship between the two files. + if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { + logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) + } + if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { + logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) + } + } + } else { + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } +} + +func (s *blobCacheDestination) HasThreadSafePutBlob() bool { + return s.destination.HasThreadSafePutBlob() +} + +func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + var tempfile *os.File + var err error + var n int + var alternateDigest digest.Digest + wg := new(sync.WaitGroup) + defer wg.Wait() + compression := archive.Uncompressed + if inputInfo.Digest != "" { + filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err == nil { + stream = io.TeeReader(stream, tempfile) + defer func() { + if err == nil { + if err = os.Rename(tempfile.Name(), filename); err != nil { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) + } + } else { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + } + tempfile.Close() + }() + } else { + logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) + } + if !isConfig { + initial := make([]byte, 8) + n, err = stream.Read(initial) + if n > 0 { + // Build a Reader that will still return the bytes that we just + // read, for PutBlob()'s sake. + stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) + if n >= len(initial) { + compression = archive.DetectCompression(initial[:n]) + } + if compression != archive.Uncompressed { + // The stream is compressed, so create a file which we'll + // use to store a decompressed copy. + decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err2 != nil { + logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) + decompressedTemp.Close() + } else { + // Write a copy of the compressed data to a pipe, + // closing the writing end of the pipe after + // PutBlob() returns. + decompressReader, decompressWriter := io.Pipe() + defer decompressWriter.Close() + stream = io.TeeReader(stream, decompressWriter) + // Let saveStream() close the reading end and handle the temporary file. + wg.Add(1) + go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) + } + } + } + } + } + newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) + if err != nil { + return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) + } + if alternateDigest.Validate() == nil { + logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) + } else { + logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) + } + return newBlobInfo, nil +} + +func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) + if err != nil || present { + return present, reusedInfo, err + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + defer f.Close() + uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) + if err != nil { + return false, types.BlobInfo{}, err + } + return true, uploadedInfo, nil + } + } + + return false, types.BlobInfo{}, nil +} + +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte) error { + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) + } else { + filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) + if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { + logrus.Warnf("error saving manifest as %q: %v", filename, err) + } + } + return d.destination.PutManifest(ctx, manifestBytes) +} + +func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + return d.destination.PutSignatures(ctx, signatures) +} + +func (d *blobCacheDestination) Commit(ctx context.Context) error { + return d.destination.Commit(ctx) +} diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache_test.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache_test.go new file mode 100644 index 00000000000..620d6f4d9cf --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache_test.go @@ -0,0 +1,241 @@ +package blobcache + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + cp "github.com/containers/image/copy" + "github.com/containers/image/pkg/blobinfocache" + "github.com/containers/image/signature" + "github.com/containers/image/transports/alltransports" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func TestMain(m *testing.M) { + flag.Parse() + if testing.Verbose() { + logrus.SetLevel(logrus.DebugLevel) + } + os.Exit(m.Run()) +} + +// Create a layer containing a single file with the specified name (and its +// name as its contents), compressed using the specified compression type, and +// return the . +func makeLayer(filename string, repeat int, compression archive.Compression) ([]byte, digest.Digest, error) { + var compressed, uncompressed bytes.Buffer + layer, err := archive.Generate(filename, strings.Repeat(filename, repeat)) + if err != nil { + return nil, "", err + } + writer, err := archive.CompressStream(&compressed, compression) + if err != nil { + return nil, "", err + } + reader := io.TeeReader(layer, &uncompressed) + _, err = io.Copy(writer, reader) + writer.Close() + if err != nil { + return nil, "", err + } + return compressed.Bytes(), digest.FromBytes(uncompressed.Bytes()), nil +} + +func TestBlobCache(t *testing.T) { + cacheDir, err := ioutil.TempDir("", "blobcache") + if err != nil { + t.Fatalf("error creating persistent cache directory: %v", err) + } + defer func() { + if err := os.RemoveAll(cacheDir); err != nil { + t.Fatalf("error removing persistent cache directory %q: %v", cacheDir, err) + } + }() + + for _, repeat := range []int{1, 10, 100, 1000, 10000} { + for _, desiredCompression := range []types.LayerCompression{types.PreserveOriginal, types.Compress, types.Decompress} { + for _, layerCompression := range []archive.Compression{archive.Uncompressed, archive.Gzip} { + // Create a layer with the specified layerCompression. + blobBytes, diffID, err := makeLayer(fmt.Sprintf("layer-content-%d", int(layerCompression)), repeat, layerCompression) + blobInfo := types.BlobInfo{ + Digest: digest.FromBytes(blobBytes), + Size: int64(len(blobBytes)), + } + // Create a configuration that includes the diffID for the layer and not much else. + config := v1.Image{ + RootFS: v1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{diffID}, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + t.Fatalf("error encoding image configuration: %v", err) + } + configInfo := types.BlobInfo{ + Digest: digest.FromBytes(configBytes), + Size: int64(len(configBytes)), + } + // Create a manifest that uses this configuration and layer. + manifest := v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: v1.Descriptor{ + Digest: configInfo.Digest, + Size: configInfo.Size, + }, + Layers: []v1.Descriptor{{ + Digest: blobInfo.Digest, + Size: blobInfo.Size, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + t.Fatalf("error encoding image manifest: %v", err) + } + // Write this image to a "dir" destination with blob caching using this directory. + srcdir, err := ioutil.TempDir("", "blobcache-source") + if err != nil { + t.Fatalf("error creating temporary source directory: %v", err) + } + defer os.RemoveAll(srcdir) + srcName := "dir:" + srcdir + srcRef, err := alltransports.ParseImageName(srcName) + if err != nil { + t.Fatalf("error parsing source image name %q: %v", srcName, err) + } + cachedSrcRef, err := NewBlobCache(srcRef, cacheDir, desiredCompression) + if err != nil { + t.Fatalf("failed to wrap reference in cache: %v", err) + } + destImage, err := cachedSrcRef.NewImageDestination(context.TODO(), nil) + if err != nil { + t.Fatalf("error opening source image for writing: %v", err) + } + _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(blobBytes), blobInfo, blobinfocache.NoCache, false) + if err != nil { + t.Fatalf("error writing layer blob to source image: %v", err) + } + _, err = destImage.PutBlob(context.TODO(), bytes.NewReader(configBytes), configInfo, blobinfocache.NoCache, true) + if err != nil { + t.Fatalf("error writing config blob to source image: %v", err) + } + err = destImage.PutManifest(context.TODO(), manifestBytes) + if err != nil { + t.Fatalf("error writing manifest to source image: %v", err) + } + err = destImage.Commit(context.TODO()) + if err != nil { + t.Fatalf("error committing source image: %v", err) + } + if err = destImage.Close(); err != nil { + t.Fatalf("error closing source image: %v", err) + } + // Check that the cache was populated. + cache, err := os.Open(cacheDir) + if err != nil { + t.Fatalf("error opening cache directory %q: %v", cacheDir, err) + } + defer cache.Close() + cachedNames, err := cache.Readdirnames(-1) + if err != nil { + t.Fatalf("error reading contents of cache directory %q: %v", cacheDir, err) + } + // Expect a layer blob, a config blob, and the manifest. + expected := 3 + if layerCompression != archive.Uncompressed { + // Expect a compressed blob, an uncompressed blob, notes for each about the other, a config blob, and the manifest. + expected = 6 + } + if len(cachedNames) != expected { + t.Fatalf("expected %d items in cache directory %q, got %d: %v", expected, cacheDir, len(cachedNames), cachedNames) + } + // Check that the blobs were all correctly stored. + for _, cachedName := range cachedNames { + if digest.Digest(cachedName).Validate() == nil { + cacheMember := filepath.Join(cacheDir, cachedName) + cacheMemberBytes, err := ioutil.ReadFile(cacheMember) + if err != nil { + t.Fatalf("error reading cache member %q: %v", cacheMember, err) + } + if digest.FromBytes(cacheMemberBytes).String() != cachedName { + t.Fatalf("cache member %q was stored incorrectly!", cacheMember) + } + } + } + // Clear out anything in the source directory that probably isn't a manifest, so that we'll + // have to depend on the cached copies of some of the blobs. + srcNameDir, err := os.Open(srcdir) + if err != nil { + t.Fatalf("error opening source directory %q: %v", srcdir, err) + } + defer srcNameDir.Close() + srcNames, err := srcNameDir.Readdirnames(-1) + if err != nil { + t.Fatalf("error reading contents of source directory %q: %v", srcdir, err) + } + for _, name := range srcNames { + if !strings.HasPrefix(name, "manifest") { + os.Remove(filepath.Join(srcdir, name)) + } + } + // Now that we've deleted some of the contents, try to copy from the source image + // to a second image. It should fail because the source is missing some blobs. + destdir, err := ioutil.TempDir("", "blobcache-destination") + if err != nil { + t.Fatalf("error creating temporary destination directory: %v", err) + } + defer os.RemoveAll(destdir) + destName := "dir:" + destdir + destRef, err := alltransports.ParseImageName(destName) + if err != nil { + t.Fatalf("error parsing destination image name %q: %v", destName, err) + } + systemContext := types.SystemContext{} + options := cp.Options{ + SourceCtx: &systemContext, + DestinationCtx: &systemContext, + } + policyContext, err := signature.NewPolicyContext(&signature.Policy{ + Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}, + }) + if err != nil { + t.Fatalf("error creating signature policy context: %v", err) + } + _, err = cp.Image(context.TODO(), policyContext, destRef, srcRef, &options) + if err == nil { + t.Fatalf("expected an error copying the image, but got success") + } else { + if os.IsNotExist(errors.Cause(err)) { + t.Logf("ok: got expected does-not-exist error copying the image with blobs missing: %v", err) + } else { + t.Logf("got an error copying the image with missing blobs, but not sure which error: %v", err) + } + } + _, err = cp.Image(context.TODO(), policyContext, destRef, cachedSrcRef, &options) + if err != nil { + t.Fatalf("unexpected error copying the image using the cache: %v", err) + } + if err = cachedSrcRef.ClearCache(); err != nil { + t.Fatalf("error clearing cache: %v", err) + } + } + } + } +} diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index 03b34029414..dc1b2bc6975 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -111,6 +111,10 @@ var ( Value: "", Usage: "use `[username[:password]]` for accessing the registry", }, + cli.BoolFlag{ + Name: "disable-compression, D", + Usage: "don't compress layers by default", + }, cli.BoolFlag{ Name: "disable-content-trust", Usage: "This is a Docker specific option and is a NOOP", @@ -144,6 +148,10 @@ var ( Name: "loglevel", Usage: "adjust logging level (range from -2 to 3)", }, + cli.StringFlag{ + Name: "platform", + Usage: "CLI compatibility: no action or effect", + }, cli.BoolTFlag{ Name: "pull", Usage: "pull the image if not present", @@ -192,6 +200,12 @@ var ( Name: "add-host", Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])", }, + cli.StringFlag{ + Name: "blob-cache", + Value: "", + Usage: "assume image blobs in the specified directory will be available for pushing", + Hidden: true, // this is here mainly so that we can test the API during integration tests + }, cli.StringSliceFlag{ Name: "cap-add", Usage: "add the specified capability when running (default [])", diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index b87eb95c741..41fdea8b1ba 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -282,7 +282,7 @@ func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) { DockerCertPath: c.String("cert-dir"), } if c.IsSet("tls-verify") { - ctx.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify") + ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify")) ctx.OCIInsecureSkipTLSVerify = !c.BoolT("tls-verify") ctx.DockerDaemonInsecureSkipTLSVerify = !c.BoolT("tls-verify") } diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 1a51edb0e69..e677c8925e7 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -5,6 +5,7 @@ import ( "io" "strings" + "github.com/containers/buildah/pkg/blobcache" "github.com/containers/buildah/util" cp "github.com/containers/image/copy" "github.com/containers/image/docker/reference" @@ -40,6 +41,10 @@ type PullOptions struct { // image name alone can not be resolved to a reference to a source // image. No separator is implicitly added. Transport string + // BlobDirectory is the name of a directory in which we'll attempt to + // store copies of layer blobs that we pull down, if any. It should + // already exist. + BlobDirectory string } func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) { @@ -182,6 +187,14 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio if err != nil { return nil, errors.Wrapf(err, "error parsing image name %q", destName) } + var maybeCachedDestRef types.ImageReference = destRef + if options.BlobDirectory != "" { + cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal) + if err != nil { + return nil, errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(destRef), options.BlobDirectory) + } + maybeCachedDestRef = cachedRef + } policy, err := signature.DefaultPolicy(sc) if err != nil { @@ -200,7 +213,7 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio }() logrus.Debugf("copying %q to %q", spec, destName) - if _, err := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, destRef, nil, "")); err != nil { + if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil { logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", spec, destName, err) return nil, err } diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 5d2cd6a3220..a0627f4892a 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -26,6 +26,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/stringid" units "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/runtime-spec/specs-go" @@ -944,10 +945,25 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) g.SetHostname(options.Hostname) } else if b.Hostname() != "" { g.SetHostname(b.Hostname()) + } else { + g.SetHostname(stringid.TruncateID(b.ContainerID)) } } else { g.SetHostname("") } + + found := false + spec := g.Spec() + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { + found = true + break + } + } + if !found { + spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) + } + return configureNetwork, configureNetworks, nil } diff --git a/vendor/github.com/containers/buildah/tests/blobcache.bats b/vendor/github.com/containers/buildah/tests/blobcache.bats new file mode 100644 index 00000000000..74613209284 --- /dev/null +++ b/vendor/github.com/containers/buildah/tests/blobcache.bats @@ -0,0 +1,320 @@ +#!/usr/bin/env bats + +load helpers + +@test "blobcache-pull" { + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + # Pull an image using a fresh directory for the blob cache. + run buildah pull --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json docker.io/kubernetes/pause + echo "$output" + [ "$status" -eq 0 ] + # Check that we dropped some files in there. + run find ${blobcachedir} -type f + echo "$output" + [ "$status" -eq 0 ] + [ "${#lines[@]}" -gt 0 ] +} + +@test "blobcache-from" { + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + # Pull an image using a fresh directory for the blob cache. + run buildah from --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json docker.io/kubernetes/pause + echo "$output" + [ "$status" -eq 0 ] + # Check that we dropped some files in there. + run find ${blobcachedir} -type f + echo "$output" + [ "$status" -eq 0 ] + [ "${#lines[*]}" -gt 0 ] +} + +@test "blobcache-commit" { + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + # Pull an image using a fresh directory for the blob cache. + run buildah --debug=false from --quiet --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json docker.io/kubernetes/pause + echo "$output" + ctr="$output" + [ "$status" -eq 0 ] + run buildah add ${ctr} ${TESTSDIR}/bud/add-file/file / + echo "$output" + [ "$status" -eq 0 ] + # Commit the image without using the blob cache. + doomeddir=${TESTDIR}/doomed + mkdir -p ${doomeddir} + ls -l ${blobcachedir} + echo buildah commit --signature-policy ${TESTSDIR}/policy.json ${ctr} dir:${doomeddir} + run buildah commit --signature-policy ${TESTSDIR}/policy.json ${ctr} dir:${doomeddir} + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the destination that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${doomeddir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 0 ] # nothing should match items in the cache + [ ${unmatched} -eq 6 ] # nothing should match items in the cache + # Commit the image using the blob cache. + destdir=${TESTDIR}/dest + mkdir -p ${destdir} + ls -l ${blobcachedir} + echo buildah commit --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${ctr} dir:${destdir} + run buildah commit --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${ctr} dir:${destdir} + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the destination that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${destdir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 5 ] # the base layers, our new layer, our config, and manifest should match items in the cache + [ ${unmatched} -eq 1 ] # the version shouldn't match an item in the cache +} + +@test "blobcache-push" { + target=targetimage + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + # Pull an image using a fresh directory for the blob cache. + run buildah --debug=false from --quiet --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json docker.io/kubernetes/pause + echo "$output" + ctr="$output" + [ "$status" -eq 0 ] + run buildah add ${ctr} ${TESTSDIR}/bud/add-file/file / + echo "$output" + [ "$status" -eq 0 ] + # Commit the image using the blob cache. + ls -l ${blobcachedir} + echo buildah commit --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${ctr} ${target} + run buildah commit --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${ctr} ${target} + echo "$output" + [ "$status" -eq 0 ] + # Try to push the image without the blob cache. + doomeddir=${TESTDIR}/doomed + mkdir -p ${doomeddir} + ls -l ${blobcachedir} + echo buildah push --signature-policy ${TESTSDIR}/policy.json ${target} dir:${doomeddir} + run buildah push --signature-policy ${TESTSDIR}/policy.json ${target} dir:${doomeddir} + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the doomed copy that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${doomeddir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 2 ] # basically, only the config and our new layer should be the same as anything in the cache + [ ${unmatched} -eq 4 ] # none of the version, manifest, and base layers should match items in the cache, since the base layers were recompressed + # Now try to push the image using the blob cache. + destdir=${TESTDIR}/dest + mkdir -p ${destdir} + ls -l ${blobcachedir} + echo buildah push --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${target} dir:${destdir} + run buildah push --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${target} dir:${destdir} + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the destination that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${destdir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 5 ] # the base image's layers, our new layer, the config, and the manifest should already have been cached + [ ${unmatched} -eq 1 ] # expected mismatch is only the "version" +} + +@test "blobcache-build-compressed-using-dockerfile-explicit-push" { + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + target=new-image + # Build an image while pulling the base image. + run buildah build-using-dockerfile -t ${target} --pull-always --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json ${TESTSDIR}/bud/add-file + echo "$output" + [ "$status" -eq 0 ] + # Now try to push the image using the blob cache. + destdir=${TESTDIR}/dest + mkdir -p ${destdir} + run buildah push --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${target} dir:${destdir} + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the destination that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${destdir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 4 ] # the config, the base layer, our new layer, and the manifest should match the cache + [ ${unmatched} -eq 1 ] # the only expected mismatch should be "version" +} + +@test "blobcache-build-uncompressed-using-dockerfile-explicit-push" { + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + target=new-image + # Build an image while pulling the base image. + run buildah build-using-dockerfile -t ${target} -D --pull-always --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json ${TESTSDIR}/bud/add-file + echo "$output" + [ "$status" -eq 0 ] + # Now try to push the image using the blob cache. + destdir=${TESTDIR}/dest + mkdir -p ${destdir} + run buildah push --signature-policy ${TESTSDIR}/policy.json --blob-cache=${blobcachedir} ${target} dir:${destdir} + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the destination that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${destdir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 2 ] # the config and previously-compressed base layer should match the cache + [ ${unmatched} -eq 3 ] # expected "version", our new layer, and the manifest to mismatch +} + +@test "blobcache-build-compressed-using-dockerfile-implicit-push" { + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + target=new-image + destdir=${TESTDIR}/dest + mkdir -p ${destdir} + # Build an image while pulling the base image, implicitly pushing while writing. + run buildah build-using-dockerfile -t dir:${destdir} --pull-always --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json ${TESTSDIR}/bud/add-file + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the destination that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${destdir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 4 ] # the layers from the base image, our layer, our config, and our manifest should match items in the cache + [ ${unmatched} -eq 1 ] # expect only the "version" to not match the cache +} + +@test "blobcache-build-uncompressed-using-dockerfile-implicit-push" { + blobcachedir=${TESTDIR}/cache + mkdir -p ${blobcachedir} + target=new-image + destdir=${TESTDIR}/dest + mkdir -p ${destdir} + # Build an image while pulling the base image, implicitly pushing while writing. + run buildah build-using-dockerfile -t dir:${destdir} -D --pull-always --blob-cache=${blobcachedir} --signature-policy ${TESTSDIR}/policy.json ${TESTSDIR}/bud/add-file + echo "$output" + [ "$status" -eq 0 ] + # Look for layer blobs in the destination that match the ones in the cache. + matched=0 + unmatched=0 + for content in ${destdir}/* ; do + match=false + for blob in ${blobcachedir}/* ; do + if cmp -s ${content} ${blob} ; then + echo $(file ${blob}) and ${content} have the same contents, was cached + match=true + break + fi + done + if ${match} ; then + matched=$(( ${matched} + 1 )) + else + unmatched=$(( ${unmatched} + 1 )) + echo ${content} was not cached + fi + done + [ ${matched} -eq 4 ] # the layers from the base image, our layer, our config, and our manifest should match items in the cache + [ ${unmatched} -eq 1 ] # expect only the "version" to not match the cache +} diff --git a/vendor/github.com/containers/buildah/tests/bud.bats b/vendor/github.com/containers/buildah/tests/bud.bats index 74897e09c44..60ead7eee80 100644 --- a/vendor/github.com/containers/buildah/tests/bud.bats +++ b/vendor/github.com/containers/buildah/tests/bud.bats @@ -336,6 +336,50 @@ load helpers [ "$status" -eq 0 ] } +@test "bud-multi-stage-builds-small-as" { + target=multi-stage-index + buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/multi-stage-builds-small-as/Dockerfile.index ${TESTSDIR}/bud/multi-stage-builds-small-as + cid=$(buildah from ${target}) + root=$(buildah mount ${cid}) + cmp $root/Dockerfile.index ${TESTSDIR}/bud/multi-stage-builds-small-as/Dockerfile.index + run test -s $root/etc/passwd + [ "$status" -eq 0 ] + buildah rm ${cid} + buildah rmi -a + run buildah --debug=false images -q + echo "$output" + [ "$status" -eq 0 ] + [ "$output" = "" ] + + target=multi-stage-name + buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} -f Dockerfile.name ${TESTSDIR}/bud/multi-stage-builds-small-as + cid=$(buildah from ${target}) + root=$(buildah mount ${cid}) + cmp $root/Dockerfile.name ${TESTSDIR}/bud/multi-stage-builds-small-as/Dockerfile.name + run test -s $root/etc/passwd + [ "$status" -ne 0 ] + buildah rm ${cid} + buildah rmi $(buildah --debug=false images -q) + run buildah --debug=false images -q + echo "$output" + [ "$output" = "" ] + [ "$status" -eq 0 ] + + target=multi-stage-mixed + buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/multi-stage-builds-small-as/Dockerfile.mixed ${TESTSDIR}/bud/multi-stage-builds-small-as + cid=$(buildah from ${target}) + root=$(buildah mount ${cid}) + cmp $root/Dockerfile.name ${TESTSDIR}/bud/multi-stage-builds-small-as/Dockerfile.name + cmp $root/Dockerfile.index ${TESTSDIR}/bud/multi-stage-builds-small-as/Dockerfile.index + cmp $root/Dockerfile.mixed ${TESTSDIR}/bud/multi-stage-builds-small-as/Dockerfile.mixed + buildah rm ${cid} + buildah rmi $(buildah --debug=false images -q) + run buildah --debug=false images -q + echo "$output" + [ "$output" = "" ] + [ "$status" -eq 0 ] +} + @test "bud-preserve-subvolumes" { # This Dockerfile needs us to be able to handle a working RUN instruction. if ! which runc ; then @@ -1072,3 +1116,22 @@ load helpers tenminutes=$(( 10 * 60 * $second )) [ "$output" = '["CMD-SHELL" "curl -f http://localhost/ || exit 1"]'" $tenminutes $fiveminutes $threeseconds 4" ] } + +@test "bud with unused build arg" { + target=busybox-image + out=$(buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} --build-arg foo=bar --build-arg foo2=bar2 -f ${TESTSDIR}/bud/build-arg ${TESTSDIR}/bud/build-arg | grep "Warning" | wc -l) + [ "$out" -ne 0 ] +} + +@test "bud with copy-from in Dockerfile no prior FROM" { + target=php-image + run buildah --debug=false bud --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/copy-from ${TESTSDIR}/bud/copy-from + echo "$output" + [ "$status" -eq 0 ] + + ctr=$(buildah --debug=false from --signature-policy ${TESTSDIR}/policy.json ${target}) + mnt=$(buildah --debug=false mount ${ctr}) + + run test -e $mnt/usr/local/bin/composer + [ "$status" -eq 0 ] +} diff --git a/vendor/github.com/containers/buildah/tests/bud/build-arg/Dockerfile b/vendor/github.com/containers/buildah/tests/bud/build-arg/Dockerfile new file mode 100644 index 00000000000..4aa37238935 --- /dev/null +++ b/vendor/github.com/containers/buildah/tests/bud/build-arg/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ARG foo diff --git a/vendor/github.com/containers/buildah/tests/bud/copy-from/Dockerfile b/vendor/github.com/containers/buildah/tests/bud/copy-from/Dockerfile new file mode 100644 index 00000000000..39145ae6fb4 --- /dev/null +++ b/vendor/github.com/containers/buildah/tests/bud/copy-from/Dockerfile @@ -0,0 +1,2 @@ +FROM php:7.2 +COPY --from=composer:latest /usr/bin/composer /usr/local/bin/composer diff --git a/vendor/github.com/containers/buildah/tests/bud/from-as/Dockerfile.skip b/vendor/github.com/containers/buildah/tests/bud/from-as/Dockerfile.skip index b28b1705f20..bc0ba158d00 100644 --- a/vendor/github.com/containers/buildah/tests/bud/from-as/Dockerfile.skip +++ b/vendor/github.com/containers/buildah/tests/bud/from-as/Dockerfile.skip @@ -1,4 +1,4 @@ -FROM centos:7 AS base +FROM registry.centos.org/centos/centos:centos7 AS base RUN touch /1 ENV LOCAL=/1 RUN find $LOCAL diff --git a/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.index b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.index new file mode 100644 index 00000000000..f3ee8c7a6a1 --- /dev/null +++ b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.index @@ -0,0 +1,5 @@ +FROM scratch +COPY Dockerfile.index / + +FROM alpine +COPY --from=0 /Dockerfile.index /Dockerfile.index diff --git a/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.mixed b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.mixed new file mode 100644 index 00000000000..9323c23b35a --- /dev/null +++ b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.mixed @@ -0,0 +1,13 @@ +FROM scratch as myname +COPY Dockerfile.name / + +FROM scratch as myname2 +COPY Dockerfile.index / + +FROM scratch +COPY Dockerfile.mixed / + +FROM scratch +COPY --from=myname /Dockerfile.name /Dockerfile.name +COPY --from=1 /Dockerfile.index /Dockerfile.index +COPY --from=2 /Dockerfile.mixed /Dockerfile.mixed diff --git a/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.name b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.name new file mode 100644 index 00000000000..8908f5c8291 --- /dev/null +++ b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds-small-as/Dockerfile.name @@ -0,0 +1,5 @@ +FROM alpine as myname +COPY Dockerfile.name / + +FROM scratch +COPY --from=myname /Dockerfile.name /Dockerfile.name diff --git a/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.mixed b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.mixed index 9323c23b35a..b436a509367 100644 --- a/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.mixed +++ b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.mixed @@ -1,7 +1,7 @@ -FROM scratch as myname +FROM scratch AS myname COPY Dockerfile.name / -FROM scratch as myname2 +FROM scratch AS myname2 COPY Dockerfile.index / FROM scratch diff --git a/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.name b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.name index 8908f5c8291..ab22ce9ebc4 100644 --- a/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.name +++ b/vendor/github.com/containers/buildah/tests/bud/multi-stage-builds/Dockerfile.name @@ -1,4 +1,4 @@ -FROM alpine as myname +FROM alpine AS myname COPY Dockerfile.name / FROM scratch diff --git a/vendor/github.com/containers/buildah/tests/conformance/bud_test.go b/vendor/github.com/containers/buildah/tests/conformance/bud_test.go index f4d53dc705d..b856f50256f 100644 --- a/vendor/github.com/containers/buildah/tests/conformance/bud_test.go +++ b/vendor/github.com/containers/buildah/tests/conformance/bud_test.go @@ -161,10 +161,11 @@ var _ = Describe("Buildah build conformance test", func() { //Check fs with container-diff fscheck := SystemExec("container-diff", []string{"diff", "daemon://buildahimage", "daemon://dockerimage", "--type=file"}) fscheck.WaitWithDefaultTimeout() - fsr := regexp.MustCompile("These entries.*?None") - Expect(len(fsr.FindAllStringSubmatch(fscheck.OutputToString(), -1))).To(Equal(3), + fsr := regexp.MustCompile("/[a-zA-Z0-9/.\\-_]+[ ]+[0-9]+") + fsrignore := regexp.MustCompile("/[a-zA-Z0-9/.\\-_]+[ ]+0") + Expect(len(fsr.FindAllString(fscheck.OutputToString(), + -1))).To(Equal(len(fsrignore.FindAllString(fscheck.OutputToString(), -1))), strings.Replace(errMsg, "FAILEDREASON", "Files inside image is different.", -1)) - } }, diff --git a/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.reusebase b/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.reusebase index 2dac62f5705..ef90ba433dc 100644 --- a/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.reusebase +++ b/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.reusebase @@ -1,4 +1,4 @@ -FROM centos:7 AS base +FROM registry.centos.org/centos/centos:centos7 AS base RUN touch /1 ENV LOCAL=/1 diff --git a/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.shell b/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.shell index 514c7c9d170..1c0651c8773 100644 --- a/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.shell +++ b/vendor/github.com/containers/buildah/tests/conformance/testdata/Dockerfile.shell @@ -1,3 +1,3 @@ -FROM centos:7 +FROM registry.centos.org/centos/centos:centos7 SHELL ["/bin/bash", "-xc"] RUN env \ No newline at end of file diff --git a/vendor/github.com/containers/buildah/tests/conformance/testdata/copy/Dockerfile b/vendor/github.com/containers/buildah/tests/conformance/testdata/copy/Dockerfile index 815de493ba7..093f4bdbe0b 100644 --- a/vendor/github.com/containers/buildah/tests/conformance/testdata/copy/Dockerfile +++ b/vendor/github.com/containers/buildah/tests/conformance/testdata/copy/Dockerfile @@ -1,3 +1,3 @@ -FROM centos:7 +FROM registry.centos.org/centos/centos:centos7 COPY script /usr/bin RUN ls -al /usr/bin/script \ No newline at end of file diff --git a/vendor/github.com/containers/buildah/tests/conformance/testdata/copydir/Dockerfile b/vendor/github.com/containers/buildah/tests/conformance/testdata/copydir/Dockerfile index 92c53fdf6da..9d225f8c75b 100644 --- a/vendor/github.com/containers/buildah/tests/conformance/testdata/copydir/Dockerfile +++ b/vendor/github.com/containers/buildah/tests/conformance/testdata/copydir/Dockerfile @@ -1,3 +1,3 @@ -FROM centos:7 +FROM registry.centos.org/centos/centos:centos7 COPY dir /dir RUN ls -al /dir/file \ No newline at end of file diff --git a/vendor/github.com/containers/buildah/tests/conformance/testdata/copyrename/Dockerfile b/vendor/github.com/containers/buildah/tests/conformance/testdata/copyrename/Dockerfile index 575bf2cd4de..1aadbe44ac2 100644 --- a/vendor/github.com/containers/buildah/tests/conformance/testdata/copyrename/Dockerfile +++ b/vendor/github.com/containers/buildah/tests/conformance/testdata/copyrename/Dockerfile @@ -1,3 +1,3 @@ -FROM centos:7 +FROM registry.centos.org/centos/centos:centos7 COPY file1 /usr/bin/file2 RUN ls -al /usr/bin/file2 && ! ls -al /usr/bin/file1 \ No newline at end of file diff --git a/vendor/github.com/containers/buildah/tests/containers.bats b/vendor/github.com/containers/buildah/tests/containers.bats index da370a7d9d3..d3bcfe01a10 100644 --- a/vendor/github.com/containers/buildah/tests/containers.bats +++ b/vendor/github.com/containers/buildah/tests/containers.bats @@ -32,6 +32,14 @@ load helpers buildah rmi -a -f } +@test "containers json test" { + cid1=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine) + out=$(buildah --debug=false containers --json | grep "{" | wc -l) + [ "$out" -ne "0" ] + buildah rm -a + buildah rmi -a -f +} + @test "containers noheading test" { cid1=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine) cid2=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json busybox) diff --git a/vendor/github.com/containers/buildah/tests/images.bats b/vendor/github.com/containers/buildah/tests/images.bats old mode 100644 new mode 100755 index 76b31a8a3ed..5134a37217c --- a/vendor/github.com/containers/buildah/tests/images.bats +++ b/vendor/github.com/containers/buildah/tests/images.bats @@ -97,6 +97,19 @@ load helpers buildah rmi -a -f } +@test "images json dup test" { + cid=$(buildah from --signature-policy ${TESTSDIR}/policy.json scratch) + buildah commit --signature-policy ${TESTSDIR}/policy.json $cid test + buildah tag test new-name + + run buildah --debug=false images --json + [ $(grep '"id": "' <<< "$output" | wc -l) -eq 1 ] + [ "${status}" -eq 0 ] + + buildah rm -a + buildah rmi -a -f +} + @test "specify an existing image" { cid1=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json alpine) cid2=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json busybox) @@ -113,3 +126,22 @@ load helpers [ $(wc -l <<< "$output") -eq 1 ] [ "${status}" -eq 1 ] } + +@test "Test dangling images" { + cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json scratch) + buildah commit --signature-policy ${TESTSDIR}/policy.json $cid test + buildah commit --signature-policy ${TESTSDIR}/policy.json $cid test + run buildah --debug=false images + [ $(wc -l <<< "$output") -eq 3 ] + [ "${status}" -eq 0 ] + run buildah --debug=false images --filter dangling=true + [[ $output =~ " " ]] + [ $(wc -l <<< "$output") -eq 2 ] + [ "${status}" -eq 0 ] + run buildah --debug=false images --filter dangling=false + [[ $output =~ " latest " ]] + [ $(wc -l <<< "$output") -eq 2 ] + [ "${status}" -eq 0 ] + buildah rm -a + buildah rmi -a -f +} diff --git a/vendor/github.com/containers/buildah/tests/info.bats b/vendor/github.com/containers/buildah/tests/info.bats new file mode 100644 index 00000000000..33f70132de2 --- /dev/null +++ b/vendor/github.com/containers/buildah/tests/info.bats @@ -0,0 +1,11 @@ +#!/usr/bin/env bats + +load helpers + +@test "info" { + out1=$(buildah info | grep "host" | wc -l) + [ "$out1" -ne "0" ] + + out2=$(buildah info --format='{{.store}}') + [ "$out2" != "" ] +} diff --git a/vendor/github.com/containers/buildah/tests/pull.bats b/vendor/github.com/containers/buildah/tests/pull.bats index 7d4b569af01..3c32160da80 100644 --- a/vendor/github.com/containers/buildah/tests/pull.bats +++ b/vendor/github.com/containers/buildah/tests/pull.bats @@ -25,3 +25,157 @@ load helpers echo "$output" [ "$status" -eq 0 ] } + +@test "pull-from-registry" { + run buildah pull --signature-policy ${TESTSDIR}/policy.json busybox:glibc + echo "$output" + [ "$status" -eq 0 ] + run buildah pull --signature-policy ${TESTSDIR}/policy.json busybox + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "busybox:glibc" ]] + [[ "$output" =~ "busybox:latest" ]] + run buildah pull --signature-policy ${TESTSDIR}/policy.json quay.io/libpod/alpine_nginx:latest + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "alpine_nginx:latest" ]] + run buildah rmi quay.io/libpod/alpine_nginx:latest + echo "$output" + [ "$status" -eq 0 ] + run buildah pull --signature-policy ${TESTSDIR}/policy.json quay.io/libpod/alpine_nginx + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "alpine_nginx:latest" ]] + run buildah pull --signature-policy ${TESTSDIR}/policy.json alpine@sha256:1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe + echo "$output" + [ "$status" -eq 0 ] + run buildah pull --signature-policy ${TESTSDIR}/policy.json fakeimage/fortest + echo "$output" + [ "$status" -ne 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ ! "$output" =~ "fakeimage/fortest" ]] +} + +@test "pull-from-docerk-archive" { + run buildah pull --signature-policy ${TESTSDIR}/policy.json alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah push --signature-policy ${TESTSDIR}/policy.json docker.io/library/alpine:latest docker-archive:/tmp/alp.tar:alpine:latest + echo "$output" + [ "$status" -eq 0 ] + run buildah rmi alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah pull --signature-policy ${TESTSDIR}/policy.json docker-archive:/tmp/alp.tar + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "alpine" ]] + run rm -f /tmp/alp.tar + echo "$output" + [ "$status" -eq 0 ] +} + +@test "pull-from-oci-archive" { + run buildah pull --signature-policy ${TESTSDIR}/policy.json alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah push --signature-policy ${TESTSDIR}/policy.json docker.io/library/alpine:latest oci-archive:/tmp/alp.tar:alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah rmi alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah pull --signature-policy ${TESTSDIR}/policy.json oci-archive:/tmp/alp.tar + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "alpine" ]] + run rm -f /tmp/alp.tar + echo "$output" + [ "$status" -eq 0 ] +} + +@test "pull-from-local-directory" { + run mkdir /tmp/buildahtest + run buildah pull --signature-policy ${TESTSDIR}/policy.json alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah push --signature-policy ${TESTSDIR}/policy.json docker.io/library/alpine:latest dir:/tmp/buildahtest + echo "$output" + [ "$status" -eq 0 ] + run buildah rmi alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah pull --signature-policy ${TESTSDIR}/policy.json dir:/tmp/buildahtest + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "buildahtest" ]] + run rm -rf /tmp/buildahtest + echo "$output" + [ "$status" -eq 0 ] +} + +@test "pull-from-docker-deamon" { + run systemctl status docker + if [[ ! "$output" =~ "active (running)" ]] + then + skip "Skip the test as docker services is not running" + fi + + run systemctl start docker + run docker pull alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah rmi alpine + echo "$output" + run buildah pull --signature-policy ${TESTSDIR}/policy.json docker-daemon:docker.io/alpine:latest + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "alpine:latest" ]] + run docker rmi -f alpine:latest + echo "$output" + [ "$status" -eq 0 ] +} + +@test "pull-from-ostree" { + run command -v ostree + if [[ $status -ne 0 ]] + then + skip "Skip the test as ostree command is not avaible" + fi + + run mkdir /tmp/ostree-repo + run ostree --repo=/tmp/ostree-repo init + echo "$output" + run buildah pull --signature-policy ${TESTSDIR}/policy.json alpine + echo "$output" + [ "$status" -eq 0 ] + run buildah push --signature-policy ${TESTSDIR}/policy.json alpine ostree:alpine@/tmp/ostree-repo + echo "$output" + [ "$status" -eq 0 ] + run buildah rmi alpine + echo "$output" + run buildah pull --signature-policy ${TESTSDIR}/policy.json ostree:alpine@/tmp/ostree-repo + echo "$output" + [ "$status" -eq 0 ] + run buildah images --format "{{.Name}}:{{.Tag}}" + echo "$output" + [[ "$output" =~ "ostree-repo:latest" ]] + run rm -rf /tmp/ostree-repo + echo "$output" + [ "$status" -eq 0 ] +} diff --git a/vendor/github.com/containers/buildah/tests/push.bats b/vendor/github.com/containers/buildah/tests/push.bats index ee845fc0749..da59b5191d5 100644 --- a/vendor/github.com/containers/buildah/tests/push.bats +++ b/vendor/github.com/containers/buildah/tests/push.bats @@ -26,7 +26,7 @@ load helpers buildah commit -D ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" scratch-image${format:+-${format}} buildah commit -D ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" dir:${TESTDIR}/committed${format:+.${format}} mkdir -p ${TESTDIR}/pushed${format:+.${format}} - buildah push --signature-policy ${TESTSDIR}/policy.json scratch-image${format:+-${format}} dir:${TESTDIR}/pushed${format:+.${format}} + buildah push -D --signature-policy ${TESTSDIR}/policy.json scratch-image${format:+-${format}} dir:${TESTDIR}/pushed${format:+.${format}} # Reencode the manifest to lose variations due to different encoders or definitions of structures. imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TESTDIR}/committed${format:+.${format}} > ${TESTDIR}/manifest.committed${format:+.${format}} imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TESTDIR}/pushed${format:+.${format}} > ${TESTDIR}/manifest.pushed${format:+.${format}} diff --git a/vendor/github.com/containers/buildah/tests/test_buildah_authentication.sh b/vendor/github.com/containers/buildah/tests/test_buildah_authentication.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/containers/buildah/tests/test_buildah_baseline.sh b/vendor/github.com/containers/buildah/tests/test_buildah_baseline.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/containers/buildah/tests/test_buildah_rpm.sh b/vendor/github.com/containers/buildah/tests/test_buildah_rpm.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/containers/buildah/tests/validate/git-validation.sh b/vendor/github.com/containers/buildah/tests/validate/git-validation.sh index 8207e780ecc..44fa61e35ba 100755 --- a/vendor/github.com/containers/buildah/tests/validate/git-validation.sh +++ b/vendor/github.com/containers/buildah/tests/validate/git-validation.sh @@ -8,6 +8,6 @@ if ! which git-validation > /dev/null 2> /dev/null ; then fi if test "$TRAVIS" != true ; then #GITVALIDATE_EPOCH=":/git-validation epoch" - GITVALIDATE_EPOCH="5fa3c0bd3fbceca38b6ad2f8a893bc918a8a514a" + GITVALIDATE_EPOCH="87cb532ab33f1f242a56e362e799f95549519d3b" fi exec git-validation -q -run DCO,short-subject ${GITVALIDATE_EPOCH:+-range "${GITVALIDATE_EPOCH}""..${GITVALIDATE_TIP:-@}"} ${GITVALIDATE_FLAGS} diff --git a/vendor/github.com/containers/buildah/unshare/unshare.c b/vendor/github.com/containers/buildah/unshare/unshare.c index 6873e49c245..3865e414f69 100644 --- a/vendor/github.com/containers/buildah/unshare/unshare.c +++ b/vendor/github.com/containers/buildah/unshare/unshare.c @@ -79,7 +79,8 @@ void _buildah_unshare(void) pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe"); if (pidfd != -1) { snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); - if (write(pidfd, buf, strlen(buf)) != strlen(buf)) { + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); _exit(1); } diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 09aa7e1eb9f..5dadec7c230 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -173,31 +173,13 @@ func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) { } } -// isRegistryInsecure checks if the named registry is marked as not secure -func isRegistryInsecure(registry string, sc *types.SystemContext) (bool, error) { - registries, err := sysregistriesv2.GetRegistries(sc) - if err != nil { - return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc)) - } - if reginfo := sysregistriesv2.FindRegistry(registry, registries); reginfo != nil { - if reginfo.Insecure { - logrus.Debugf("registry %q is marked insecure in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc)) - } else { - logrus.Debugf("registry %q is not marked insecure in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc)) - } - return reginfo.Insecure, nil - } - logrus.Debugf("registry %q is not listed in registries configuration %q, assuming it's secure", registry, sysregistries.RegistriesConfPath(sc)) - return false, nil -} - // isRegistryBlocked checks if the named registry is marked as blocked func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) { - registries, err := sysregistriesv2.GetRegistries(sc) + reginfo, err := sysregistriesv2.FindRegistry(sc, registry) if err != nil { return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistries.RegistriesConfPath(sc)) } - if reginfo := sysregistriesv2.FindRegistry(registry, registries); reginfo != nil { + if reginfo != nil { if reginfo.Blocked { logrus.Debugf("registry %q is marked as blocked in registries configuration %q", registry, sysregistries.RegistriesConfPath(sc)) } else { @@ -221,11 +203,6 @@ func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, wha return false, nil } -// isReferenceInsecure checks if the registry part of a reference is insecure -func isReferenceInsecure(ref types.ImageReference, sc *types.SystemContext) (bool, error) { - return isReferenceSomething(ref, sc, isRegistryInsecure) -} - // isReferenceBlocked checks if the registry part of a reference is blocked func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool, error) { if ref != nil && ref.Transport() != nil { diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index b2451b78b3b..f4cac522eb8 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -122,12 +122,11 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto // Figure out the list of registries. var registries []string - allRegistries, err := sysregistriesv2.GetRegistries(sc) + searchRegistries, err := sysregistriesv2.FindUnqualifiedSearchRegistries(sc) if err != nil { logrus.Debugf("unable to read configured registries to complete %q: %v", name, err) - registries = []string{} } - for _, registry := range sysregistriesv2.FindUnqualifiedSearchRegistries(allRegistries) { + for _, registry := range searchRegistries { if !registry.Blocked { registries = append(registries, registry.URL) } @@ -396,57 +395,11 @@ func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.Linux // ParseIDMappings parses mapping triples. func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - nonDigitsToWhitespace := func(r rune) rune { - if strings.IndexRune("0123456789", r) == -1 { - return ' ' - } else { - return r - } - } - parseTriple := func(spec []string) (container, host, size uint32, err error) { - cid, err := strconv.ParseUint(spec[0], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) - } - hid, err := strconv.ParseUint(spec[1], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) - } - sz, err := strconv.ParseUint(spec[2], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) - } - return uint32(cid), uint32(hid), uint32(sz), nil - } - parseIDMap := func(mapSpec []string, mapSetting string) (idmap []idtools.IDMap, err error) { - for _, idMapSpec := range mapSpec { - idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) - if len(idSpec)%3 != 0 { - return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) - } - for i := range idSpec { - if i%3 != 0 { - continue - } - cid, hid, size, err := parseTriple(idSpec[i : i+3]) - if err != nil { - return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) - } - mapping := idtools.IDMap{ - ContainerID: int(cid), - HostID: int(hid), - Size: int(size), - } - idmap = append(idmap, mapping) - } - } - return idmap, nil - } - uid, err := parseIDMap(uidmap, "userns-uid-map") + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") if err != nil { return nil, nil, err } - gid, err := parseIDMap(gidmap, "userns-gid-map") + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") if err != nil { return nil, nil, err } diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf index 185cde449ea..ee8192935a5 100644 --- a/vendor/github.com/containers/buildah/vendor.conf +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -3,9 +3,10 @@ github.com/blang/semver master github.com/BurntSushi/toml master github.com/containerd/continuity master github.com/containernetworking/cni v0.7.0-alpha1 -github.com/containers/image de7be82ee3c7fb676bf6cfdc9090be7cc28f404c -github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e -github.com/containers/storage 3161726d1db0d0d4e86a9667dd476f09b997f497 +github.com/containers/image 0c6cc8e1420001ae39fa89d308b3b3bc5ee81c57 +github.com/boltdb/bolt master +github.com/containers/libpod c8eaf59d5f4bec249db8134c6a9fcfbcac792519 +github.com/containers/storage 60a692f7ce891feb91ce0eda87bd06bfd5651dff github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 @@ -31,7 +32,7 @@ github.com/mistifyio/go-zfs master github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c github.com/mtrmac/gpgme master github.com/Nvveen/Gotty master -github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc master github.com/opencontainers/runtime-spec v1.0.0 @@ -54,10 +55,14 @@ github.com/xeipuuv/gojsonreference master github.com/xeipuuv/gojsonschema master golang.org/x/crypto master golang.org/x/net master +golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e golang.org/x/sys master golang.org/x/text master -gopkg.in/cheggaaa/pb.v1 v1.0.13 +gopkg.in/cheggaaa/pb.v1 v1.0.27 gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b k8s.io/apimachinery master k8s.io/client-go master k8s.io/kubernetes master +github.com/klauspost/pgzip v1.2.1 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 diff --git a/vendor/github.com/containers/image/.travis.Dockerfile b/vendor/github.com/containers/image/.travis.Dockerfile index 00ce1dbdd71..70b143408af 100644 --- a/vendor/github.com/containers/image/.travis.Dockerfile +++ b/vendor/github.com/containers/image/.travis.Dockerfile @@ -3,8 +3,8 @@ FROM ubuntu:artful RUN apt-get -qq update && \ apt-get install -y sudo docker.io git make btrfs-tools libdevmapper-dev libgpgme-dev libostree-dev -ADD https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz /tmp +ADD https://storage.googleapis.com/golang/go1.11.1.linux-amd64.tar.gz /tmp -RUN tar -C /usr/local -xzf /tmp/go1.9.2.linux-amd64.tar.gz && \ - rm /tmp/go1.9.2.linux-amd64.tar.gz && \ +RUN tar -C /usr/local -xzf /tmp/go1.11.1.linux-amd64.tar.gz && \ + rm /tmp/go1.11.1.linux-amd64.tar.gz && \ ln -s /usr/local/go/bin/* /usr/local/bin/ diff --git a/vendor/github.com/containers/image/.travis.yml b/vendor/github.com/containers/image/.travis.yml index 5f06dd2f9c2..3c39a0e5f32 100644 --- a/vendor/github.com/containers/image/.travis.yml +++ b/vendor/github.com/containers/image/.travis.yml @@ -21,7 +21,7 @@ script: > -e TRAVIS=$TRAVIS -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG -e TRAVIS_BRANCH=$TRAVIS_BRANCH -e TRAVIS_COMMIT=$TRAVIS_COMMIT - -e GOPATH=/gopath -e TRASH_CACHE=/gopath/.trashcache + -e GOPATH=/gopath -e TRASH_CACHE=/gopath/.trashcache -e HOME=/gopath -v /etc/passwd:/etc/passwd -v /etc/sudoers:/etc/sudoers -v /etc/sudoers.d:/etc/sudoers.d -v /var/run:/var/run:z -v $HOME/gopath:/gopath:Z -w /gopath/src/github.com/containers/image image-test bash -c "PATH=$PATH:/gopath/bin make cross tools .gitvalidation validate test test-skopeo SUDO=sudo BUILDTAGS=\"$BUILDTAGS\"" diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 313d802b3d5..89c7e580f88 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -2,7 +2,6 @@ package copy import ( "bytes" - "compress/gzip" "context" "fmt" "io" @@ -10,28 +9,38 @@ import ( "reflect" "runtime" "strings" + "sync" "time" "github.com/containers/image/image" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/pkg/compression" "github.com/containers/image/signature" "github.com/containers/image/transports" "github.com/containers/image/types" + "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" pb "gopkg.in/cheggaaa/pb.v1" ) type digestingReader struct { - source io.Reader - digester digest.Digester - expectedDigest digest.Digest - validationFailed bool + source io.Reader + digester digest.Digester + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool } +// maxParallelDownloads is used to limit the maxmimum number of parallel +// downloads. Let's follow Firefox by limiting it to 6. +var maxParallelDownloads = 6 + // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error -// and set validationFailed to true if the source stream does not match expectedDigest. +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { if err := expectedDigest.Validate(); err != nil { return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) @@ -64,6 +73,7 @@ func (d *digestingReader) Read(p []byte) (int, error) { d.validationFailed = true return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) } + d.validationSucceeded = true } return n, err } @@ -71,21 +81,23 @@ func (d *digestingReader) Read(p []byte) (int, error) { // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - cachedDiffIDs map[digest.Digest]digest.Digest dest types.ImageDestination rawSource types.ImageSource reportWriter io.Writer progressInterval time.Duration progress chan types.ProgressProperties + blobInfoCache types.BlobInfoCache + copyInParallel bool } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + canModifyManifest bool + canSubstituteBlobs bool } // Options allows supplying non-default configuration modifying the behavior of CopyImage. @@ -140,13 +152,18 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } }() + copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() c := &copier{ - cachedDiffIDs: make(map[digest.Digest]digest.Digest), dest: dest, rawSource: rawSource, reportWriter: reportWriter, progressInterval: options.ProgressInterval, progress: options.Progress, + copyInParallel: copyInParallel, + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually + // we might want to add a separate CommonCtx — or would that be too confusing? + blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), } unparsedToplevel := image.UnparsedInstance(rawSource, nil) @@ -235,6 +252,13 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli src: src, // diffIDsAreNeeded is computed later canModifyManifest: len(sigs) == 0, + // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "", } if err := ic.updateEmbeddedDockerReference(); err != nil { @@ -365,11 +389,26 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { return nil } +// shortDigest returns the first 12 characters of the digest. +func shortDigest(d digest.Digest) string { + return d.Encoded()[:12] +} + +// createProgressBar creates a pb.ProgressBar. +func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) *pb.ProgressBar { + bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) + bar.SetMaxWidth(80) + bar.ShowTimeLeft = false + bar.ShowPercent = false + bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest))) + bar.Output = writer + return bar +} + // copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() - destInfos := []types.BlobInfo{} - diffIDs := []digest.Digest{} + numLayers := len(srcInfos) updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) if err != nil { return err @@ -382,30 +421,83 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos = updatedSrcInfos srcInfosUpdated = true } - for _, srcLayer := range srcInfos { - var ( - destInfo types.BlobInfo - diffID digest.Digest - err error - ) + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + copyGroup.Add(numLayers) + // copySemaphore is used to limit the number of parallel downloads to + // avoid malicious images causing troubles and to be nice to servers. + var copySemaphore *semaphore.Weighted + if ic.c.copyInParallel { + copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + } else { + copySemaphore = semaphore.NewWeighted(int64(1)) + } + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *pb.ProgressBar) { + defer bar.Finish() + defer copySemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. if ic.diffIDsAreNeeded { - return errors.New("getting DiffID for foreign layers is unimplemented") + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + bar.Prefix(fmt.Sprintf("Skipping blob %s (DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest))) + bar.Finish() + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest))) + bar.Add64(bar.Total) + bar.Finish() } - destInfo = srcLayer - ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } else { - destInfo, diffID, err = ic.copyLayer(ctx, srcLayer) - if err != nil { - return err - } + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar) } - destInfos = append(destInfos, destInfo) - diffIDs = append(diffIDs, diffID) + data[index] = cld } + + progressBars := make([]*pb.ProgressBar, numLayers) + for i, srcInfo := range srcInfos { + bar := createProgressBar(srcInfo, "blob", nil) + progressBars[i] = bar + } + + progressPool := pb.NewPool(progressBars...) + progressPool.Output = ic.c.reportWriter + if err := progressPool.Start(); err != nil { + return errors.Wrapf(err, "error creating progress-bar pool") + } + + for i, srcLayer := range srcInfos { + copySemaphore.Acquire(ctx, 1) + go copyLayerHelper(i, srcLayer, progressBars[i]) + } + + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + + copyGroup.Wait() + progressPool.Stop() + + for i, cld := range data { + if cld.err != nil { + return cld.err + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs @@ -472,12 +564,14 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - c.Printf("Copying config %s\n", srcInfo.Digest) configBlob, err := src.ConfigBlob(ctx) if err != nil { return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) } - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true) + bar := createProgressBar(srcInfo, "config", c.reportWriter) + defer bar.Finish() + bar.Start() + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) if err != nil { return err } @@ -497,40 +591,33 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { - // Check if we already have a blob with this digest - haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest) - } - // If we already have a cached diffID for this blob, we don't need to compute it - diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "") - // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again - if haveBlob && !diffIDIsNeeded { - // Check the blob sizes match, if we were given a size this time - if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize { - return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size) - } - srcInfo.Size = extantBlobSize - // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob - blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo) +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) { + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + + // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. + if !diffIDIsNeeded { + reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest) + return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) + } + if reused { + bar.Prefix(fmt.Sprintf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest))) + bar.Add64(bar.Total) + bar.Finish() + return blobInfo, cachedDiffID, nil } - ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest) - return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err } // Fallback: copy the layer, computing the diffID if we need to do so - ic.c.Printf("Copying blob %s\n", srcInfo.Digest) - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } defer srcStream.Close() blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, - diffIDIsNeeded) + diffIDIsNeeded, bar) if err != nil { return types.BlobInfo{}, "", err } @@ -543,11 +630,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) return blobInfo, diffIDResult.digest, nil } } else { - return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil + return blobInfo, cachedDiffID, nil } } @@ -556,7 +645,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t // perhaps compressing the stream if canCompress, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -580,7 +669,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea return pipeWriter } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -617,14 +706,14 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream // === Process input through digestingReader to validate against the expected digest. // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, // use a separate validation failure indicator. - // Note that we don't use a stronger "validationSucceeded" indicator, because + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because // dest.PutBlob may detect that the layer already exists, in which case we don't // read stream to the end, and validation does not happen. digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) @@ -640,16 +729,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } isCompressed := decompressor != nil - - // === Report progress using a pb.Reader. - bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = c.reportWriter - bar.SetMaxWidth(80) - bar.ShowTimeLeft = false - bar.ShowPercent = false - bar.Start() destStream = bar.NewProxyReader(destStream) - defer bar.Finish() // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. @@ -660,8 +740,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // === Deal with layer compression/decompression if necessary var inputInfo types.BlobInfo + var compressionOperation types.LayerCompression if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { logrus.Debugf("Compressing blob on the fly") + compressionOperation = types.Compress pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() @@ -674,6 +756,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr inputInfo.Size = -1 } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { logrus.Debugf("Blob will be decompressed") + compressionOperation = types.Decompress s, err := decompressor(destStream) if err != nil { return types.BlobInfo{}, err @@ -684,6 +767,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr inputInfo.Size = -1 } else { logrus.Debugf("Using original blob without modification") + compressionOperation = types.PreserveOriginal inputInfo = srcInfo } @@ -699,7 +783,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // === Finally, send the layer stream to dest. - uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig) + uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } @@ -722,6 +806,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) } + if digestingReader.validationSucceeded { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } + } return uploadedInfo, nil } @@ -732,7 +832,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) { dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() }() - zipper := gzip.NewWriter(dest) + zipper := pgzip.NewWriter(dest) defer zipper.Close() _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() diff --git a/vendor/github.com/containers/image/copy/copy_test.go b/vendor/github.com/containers/image/copy/copy_test.go index b98133a88f2..c6760447404 100644 --- a/vendor/github.com/containers/image/copy/copy_test.go +++ b/vendor/github.com/containers/image/copy/copy_test.go @@ -51,6 +51,7 @@ func TestDigestingReaderRead(t *testing.T) { assert.Equal(t, int64(len(c.input)), n, c.digest.String()) assert.Equal(t, c.input, dest.Bytes(), c.digest.String()) assert.False(t, reader.validationFailed, c.digest.String()) + assert.True(t, reader.validationSucceeded, c.digest.String()) } // Modified input for _, c := range cases { @@ -60,7 +61,23 @@ func TestDigestingReaderRead(t *testing.T) { dest := bytes.Buffer{} _, err = io.Copy(&dest, reader) assert.Error(t, err, c.digest.String()) - assert.True(t, reader.validationFailed) + assert.True(t, reader.validationFailed, c.digest.String()) + assert.False(t, reader.validationSucceeded, c.digest.String()) + } + // Truncated input + for _, c := range cases { + source := bytes.NewReader(c.input) + reader, err := newDigestingReader(source, c.digest) + require.NoError(t, err, c.digest.String()) + if len(c.input) != 0 { + dest := bytes.Buffer{} + truncatedLen := int64(len(c.input) - 1) + n, err := io.CopyN(&dest, reader, truncatedLen) + assert.NoError(t, err, c.digest.String()) + assert.Equal(t, truncatedLen, n, c.digest.String()) + } + assert.False(t, reader.validationFailed, c.digest.String()) + assert.False(t, reader.validationSucceeded, c.digest.String()) } } diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index d888931fe67..4b2ab022e24 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -124,13 +124,19 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dirImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { +func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") if err != nil { return types.BlobInfo{}, err @@ -169,27 +175,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp return types.BlobInfo{Digest: computedDigest, Size: size}, nil } -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) { +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } blobPath := d.ref.layerPath(info.Digest) finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { - return false, -1, nil + return false, types.BlobInfo{}, nil } if err != nil { - return false, -1, err + return false, types.BlobInfo{}, err } - return true, finfo.Size(), nil -} + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil -func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) { - return info, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go index 5e17c37c0e5..59b625397b8 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -48,8 +48,15 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest return m, manifest.GuessMIMEType(m), err } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dirImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { r, err := os.Open(s.ref.layerPath(info.Digest)) if err != nil { return nil, -1, err diff --git a/vendor/github.com/containers/image/directory/directory_test.go b/vendor/github.com/containers/image/directory/directory_test.go index 647c5c249f1..eb2bdc3c616 100644 --- a/vendor/github.com/containers/image/directory/directory_test.go +++ b/vendor/github.com/containers/image/directory/directory_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -57,13 +58,14 @@ func TestGetPutManifest(t *testing.T) { func TestGetPutBlob(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) + cache := blobinfocache.NewMemoryCache() blob := []byte("test-blob") dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() assert.Equal(t, types.PreserveOriginal, dest.DesiredLayerCompression()) - info, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{Digest: digest.Digest("sha256:digest-test"), Size: int64(9)}, false) + info, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{Digest: digest.Digest("sha256:digest-test"), Size: int64(9)}, cache, false) assert.NoError(t, err) err = dest.Commit(context.Background()) assert.NoError(t, err) @@ -73,7 +75,7 @@ func TestGetPutBlob(t *testing.T) { src, err := ref.NewImageSource(context.Background(), nil) require.NoError(t, err) defer src.Close() - rc, size, err := src.GetBlob(context.Background(), info) + rc, size, err := src.GetBlob(context.Background(), info, cache) assert.NoError(t, err) defer rc.Close() b, err := ioutil.ReadAll(rc) @@ -99,6 +101,7 @@ func TestPutBlobDigestFailure(t *testing.T) { dirRef, ok := ref.(dirReference) require.True(t, ok) blobPath := dirRef.layerPath(blobDigest) + cache := blobinfocache.NewMemoryCache() firstRead := true reader := readerFromFunc(func(p []byte) (int, error) { @@ -120,7 +123,7 @@ func TestPutBlobDigestFailure(t *testing.T) { dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() - _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, false) + _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, cache, false) assert.Error(t, err) assert.Contains(t, digestErrorString, err.Error()) err = dest.Commit(context.Background()) diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/docker/cache.go new file mode 100644 index 00000000000..64ad57a7c6e --- /dev/null +++ b/vendor/github.com/containers/image/docker/cache.go @@ -0,0 +1,23 @@ +package docker + +import ( + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" +) + +// bicTransportScope returns a BICTransportScope appropriate for ref. +func bicTransportScope(ref dockerReference) types.BICTransportScope { + // Blobs can be reused across the whole registry. + return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} +} + +// newBICLocationReference returns a BICLocationReference appropriate for ref. +func newBICLocationReference(ref dockerReference) types.BICLocationReference { + // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). + return types.BICLocationReference{Opaque: ref.ref.Name()} +} + +// parseBICLocationReference returns a repository for encoded lr. +func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { + return reference.ParseNormalizedNamed(lr.Opaque) +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport_test.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport_test.go index e54dc7400b5..4a39b2af0e5 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport_test.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_transport_test.go @@ -50,15 +50,15 @@ func TestParseReference(t *testing.T) { // testParseReference is a test shared for Transport.ParseReference and ParseReference. func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { for _, c := range []struct{ input, expectedID, expectedRef string }{ - {sha256digest, sha256digest, ""}, // Valid digest format - {"sha512:" + sha256digestHex + sha256digestHex, "", ""}, // Non-digest.Canonical digest - {"sha256:ab", "", ""}, // Invalid digest value (too short) - {sha256digest + "ab", "", ""}, // Invalid digest value (too long) + {sha256digest, sha256digest, ""}, // Valid digest format + {"sha512:" + sha256digestHex + sha256digestHex, "", ""}, // Non-digest.Canonical digest + {"sha256:ab", "", ""}, // Invalid digest value (too short) + {sha256digest + "ab", "", ""}, // Invalid digest value (too long) {"sha256:XX23456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "", ""}, // Invalid digest value - {"UPPERCASEISINVALID", "", ""}, // Invalid reference input - {"busybox", "", ""}, // Missing tag or digest - {"busybox:latest", "", "docker.io/library/busybox:latest"}, // Explicit tag - {"busybox@" + sha256digest, "", "docker.io/library/busybox@" + sha256digest}, // Explicit digest + {"UPPERCASEISINVALID", "", ""}, // Invalid reference input + {"busybox", "", ""}, // Missing tag or digest + {"busybox:latest", "", "docker.io/library/busybox:latest"}, // Explicit tag + {"busybox@" + sha256digest, "", "docker.io/library/busybox@" + sha256digest}, // Explicit digest // A github.com/distribution/reference value can have a tag and a digest at the same time! // Most versions of docker/reference do not handle that (ignoring the tag), so we reject such input. {"busybox:latest@" + sha256digest, "", ""}, // Both tag and digest diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 6d2c5b670d5..23d2ac70fdb 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -13,10 +13,12 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" "github.com/containers/image/docker/reference" "github.com/containers/image/pkg/docker/config" + "github.com/containers/image/pkg/sysregistriesv2" "github.com/containers/image/pkg/tlsclientconfig" "github.com/containers/image/types" "github.com/docker/distribution/registry/client" @@ -69,30 +71,39 @@ type extensionSignatureList struct { } type bearerToken struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + expirationTime time.Time } // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string + sys *types.SystemContext + registry string + client *http.Client + insecureSkipTLSVerify bool + + // The following members are not set by newDockerClient and must be set by callers if needed. username string password string - client *http.Client signatureBase signatureStorageBase scope authScope + extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo) // The following members are detected registry properties: // They are set after a successful detectProperties(), and never change afterwards. scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. challenges []challenge supportsSignatures bool - // The following members are private state for setupRequestAuth, both are valid if token != nil. - token *bearerToken - tokenExpiration time.Time + + // Private state for setupRequestAuth (key: string, value: bearerToken) + tokenCache sync.Map + // detectPropertiesError caches the initial error. + detectPropertiesError error + // detectPropertiesOnce is used to execuute detectProperties() at most once in in makeRequest(). + detectPropertiesOnce sync.Once } type authScope struct { @@ -128,6 +139,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { if token.IssuedAt.IsZero() { token.IssuedAt = time.Now().UTC() } + token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) return token, nil } @@ -194,13 +206,26 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write if err != nil { return nil, err } - remoteName := reference.Path(ref.ref) - return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName) + client, err := newDockerClient(sys, registry, ref.ref.Name()) + if err != nil { + return nil, err + } + client.username = username + client.password = password + client.signatureBase = sigBase + client.scope.actions = actions + client.scope.remoteName = reference.Path(ref.ref) + return client, nil } -// newDockerClientWithDetails returns a new dockerClient instance for the given parameters -func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) { +// newDockerClient returns a new dockerClient instance for the given registry +// and reference. The reference is used to query the registry configuration +// and can either be a registry (e.g, "registry.com[:5000]"), a repository +// (e.g., "registry.com[:5000][/some/namespace]/repo"). +// Please note that newDockerClient does not set all members of dockerClient +// (e.g., username and password); those must be set by callers if necessary. +func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { hostName := registry if registry == dockerHostname { registry = dockerRegistry @@ -221,33 +246,43 @@ func newDockerClientWithDetails(sys *types.SystemContext, registry, username, pa return nil, err } - if sys != nil && sys.DockerInsecureSkipTLSVerify { - tr.TLSClientConfig.InsecureSkipVerify = true + // Check if TLS verification shall be skipped (default=false) which can + // either be specified in the sysregistriesv2 configuration or via the + // SystemContext, whereas the SystemContext is prioritized. + skipVerify := false + if sys != nil && sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { + // Only use the SystemContext if the actual value is defined. + skipVerify = sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue + } else { + reg, err := sysregistriesv2.FindRegistry(sys, reference) + if err != nil { + return nil, errors.Wrapf(err, "error loading registries") + } + if reg != nil { + skipVerify = reg.Insecure + } } + tr.TLSClientConfig.InsecureSkipVerify = skipVerify return &dockerClient{ - sys: sys, - registry: registry, - username: username, - password: password, - client: &http.Client{Transport: tr}, - signatureBase: sigBase, - scope: authScope{ - actions: actions, - remoteName: remoteName, - }, + sys: sys, + registry: registry, + client: &http.Client{Transport: tr}, + insecureSkipTLSVerify: skipVerify, }, nil } // CheckAuth validates the credentials by attempting to log into the registry // returns an error if an error occcured while making the http request or the status code received was 401 func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { - newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") + client, err := newDockerClient(sys, registry, registry) if err != nil { return errors.Wrapf(err, "error creating new docker client") } + client.username = username + client.password = password - resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth) + resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth) if err != nil { return err } @@ -299,16 +334,21 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima return nil, errors.Wrapf(err, "error getting username and password") } - // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail - // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results + // The /v2/_catalog endpoint has been disabled for docker.io therefore + // the call made to that endpoint will fail. So using the v1 hostname + // for docker.io for simplicity of implementation and the fact that it + // returns search results. + hostname := registry if registry == dockerHostname { - registry = dockerV1Hostname + hostname = dockerV1Hostname } - client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") + client, err := newDockerClient(sys, hostname, registry) if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } + client.username = username + client.password = password // Only try the v1 search endpoint if the search query is not empty. If it is // empty skip to the v2 endpoint. @@ -432,24 +472,27 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error { req.SetBasicAuth(c.username, c.password) return nil case "bearer": - if c.token == nil || time.Now().After(c.tokenExpiration) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return errors.Errorf("missing realm in bearer auth challenge") - } - service, _ := challenge.Parameters["service"] // Will be "" if not present - var scope string - if c.scope.remoteName != "" && c.scope.actions != "" { - scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) - } - token, err := c.getBearerToken(req.Context(), realm, service, scope) + cacheKey := "" + scopes := []authScope{c.scope} + if c.extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). + cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions) + scopes = append(scopes, *c.extraScope) + } + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) + } + if !inCache || time.Now().After(token.expirationTime) { + t, err := c.getBearerToken(req.Context(), challenge, scopes) if err != nil { return err } - c.token = token - c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + token = *t + c.tokenCache.Store(cacheKey, token) } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) @@ -459,7 +502,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error { return nil } -func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) { +func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.Errorf("missing realm in bearer auth challenge") + } + authReq, err := http.NewRequest("GET", realm, nil) if err != nil { return nil, err @@ -469,11 +517,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope if c.username != "" { getParams.Add("account", c.username) } - if service != "" { + if service, ok := challenge.Parameters["service"]; ok && service != "" { getParams.Add("service", service) } - if scope != "" { - getParams.Add("scope", scope) + for _, scope := range scopes { + if scope.remoteName != "" && scope.actions != "" { + getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + } } authReq.URL.RawQuery = getParams.Encode() if c.username != "" && c.password != "" { @@ -505,9 +555,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope return newBearerTokenFromJSONBlob(tokenBlob) } -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { +// detectPropertiesHelper performs the work of detectProperties which executes +// it at most once. +func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { if c.scheme != "" { return nil } @@ -530,7 +580,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error { return nil } err := ping("https") - if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { + if err != nil && c.insecureSkipTLSVerify { err = ping("http") } if err != nil { @@ -554,7 +604,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error { return true } isV1 := pingV1("https") - if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify { + if !isV1 && c.insecureSkipTLSVerify { isV1 = pingV1("http") } if isV1 { @@ -564,6 +614,13 @@ func (c *dockerClient) detectProperties(ctx context.Context) error { return err } +// detectProperties detects various properties of the registry. +// See the dockerClient documentation for members which are affected by this. +func (c *dockerClient) detectProperties(ctx context.Context) error { + c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) + return c.detectPropertiesError +} + // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 9bbffef9394..973d160d0ae 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -15,6 +15,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" @@ -110,20 +111,29 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) { return len(p), nil } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dockerImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { +func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { if inputInfo.Digest.String() != "" { - haveBlob, size, err := d.HasBlob(ctx, inputInfo) + // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. + // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. + // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. + haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false) if err != nil { return types.BlobInfo{}, err } if haveBlob { - return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil + return reusedInfo, nil } } @@ -160,7 +170,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") } - // FIXME: DELETE uploadLocation on failure + // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) locationQuery := uploadLocation.Query() // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 @@ -177,19 +187,15 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, } logrus.Debugf("Upload of layer %s complete", computedDigest) + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil } -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); +// blobExists returns true iff repo contains a blob with digest, and if so, also its size. +// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String()) - +func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) { + checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) logrus.Debugf("Checking %s", checkPath) res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth) if err != nil { @@ -202,7 +208,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf return true, getBlobSize(res), nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") - return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name()) + return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name()) case http.StatusNotFound: logrus.Debugf("... not present") return false, -1, nil @@ -211,8 +217,134 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf } } -func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) { - return info, nil +// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. +func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error { + u := url.URL{ + Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), + RawQuery: url.Values{ + "mount": {srcDigest.String()}, + "from": {reference.Path(srcRepo)}, + }.Encode(), + } + mountPath := u.String() + logrus.Debugf("Trying to mount %s", mountPath) + res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth) + if err != nil { + return err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusCreated: + logrus.Debugf("... mount OK") + return nil + case http.StatusAccepted: + // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. + // Abort, and let the ultimate caller do an upload when its ready, instead. + // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. + uploadLocation, err := res.Location() + if err != nil { + return errors.Wrap(err, "Error determining upload URL after a mount attempt") + } + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) + res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth) + if err != nil { + logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) + } else { + defer res2.Body.Close() + if res2.StatusCode != http.StatusNoContent { + logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) + } + } + // Anyway, if canceling the upload fails, ignore it and return the more important error: + return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + default: + logrus.Debugf("Error mounting, response %#v", *res) + return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + } +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + + // First, check whether the blob happens to already exist at the destination. + exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest) + if err != nil { + return false, types.BlobInfo{}, err + } + if exists { + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + } + + // Then try reusing blobs from other locations. + + // Checking candidateRepo, and mounting from it, requires an expanded token scope. + // We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack. + if d.c.extraScope != nil { + return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob") + } + defer func() { + d.c.extraScope = nil + }() + for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { + candidateRepo, err := parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) + + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } + if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + logrus.Debug("... Already tried the primary destination") + continue + } + + // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. + d.c.extraScope = &authScope{ + remoteName: reference.Path(candidateRepo), + actions: "pull", + } + // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. + // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. + // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. + // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. + // Even worse, docker/distribution does not actually reasonably implement canceling uploads + // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); + // so, be a nice client and don't create unnecesary upload sessions on the server. + exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + if !exists { + // FIXME? Should we drop the blob from cache here (and elsewhere?)? + continue // logrus.Debug() already happened in blobExists + } + if candidateRepo.Name() != d.ref.ref.Name() { + if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil { + logrus.Debugf("... Mount failed: %v", err) + continue + } + } + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil + } + + return false, types.BlobInfo{}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index aedab973183..c88ff2f3462 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -161,8 +161,15 @@ func getBlobSize(resp *http.Response) int64 { return size } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dockerImageSource) HasThreadSafeGetBlob() bool { + return true +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if len(info.URLs) != 0 { return s.getExternalBlob(ctx, info.URLs) } @@ -177,6 +184,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i // print url also return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) } + cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) return res.Body, getBlobSize(res), nil } diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index d6510ccf151..5f30eddbc72 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -82,13 +82,19 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool { return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { // Ouch, we need to stream the blob into a temporary file just to determine the size. // When the layer is decompressed, we also have to generate the digest on uncompressed datas. if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { @@ -120,12 +126,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t } // Maybe the blob has been already sent - ok, size, err := d.HasBlob(ctx, inputInfo) + ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) if err != nil { return types.BlobInfo{}, err } if ok { - return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil + return reusedInfo, nil } if isConfig { @@ -151,29 +157,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil } -// HasBlob returns true iff the image destination already contains a blob with -// the matching digest which can be reapplied using ReapplyBlob. Unlike -// PutBlob, the digest can not be empty. If HasBlob returns true, the size of -// the blob must also be returned. If the destination does not contain the -// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it -// returns a non-nil error only on an unexpected failure. -func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) { +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if info.Digest == "" { - return false, -1, errors.Errorf("Can not check for a blob with unknown digest") + return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") } if blob, ok := d.blobs[info.Digest]; ok { - return true, blob.Size, nil + return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil } - return false, -1, nil -} - -// ReapplyBlob informs the image destination that a blob for which HasBlob -// previously returned true would have been passed to PutBlob if it had -// returned false. Like HasBlob and unlike PutBlob, the digest can not be -// empty. If the blob is a filesystem layer, this signifies that the changes -// it describes need to be applied again when composing a filesystem tree. -func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) { - return info, nil + return false, types.BlobInfo{}, nil } func (d *Destination) createRepositoriesFile(rootLayerID string) error { diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index 942893a8125..889e5f8e8d9 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -397,8 +397,15 @@ func (r uncompressedReadCloser) Close() error { return res } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, 0, err } diff --git a/vendor/github.com/containers/image/image/docker_schema1_test.go b/vendor/github.com/containers/image/image/docker_schema1_test.go index b26fa6da629..a7deb2dc814 100644 --- a/vendor/github.com/containers/image/image/docker_schema1_test.go +++ b/vendor/github.com/containers/image/image/docker_schema1_test.go @@ -261,12 +261,12 @@ func TestManifestSchema1Inspect(t *testing.T) { "release": "20180124.1", "summary": "Red Hat OpenStack Platform 12.0 nova-api", "tripleo-common_version": "7.6.3-23-g4891cfe", - "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1", - "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98", - "vcs-type": "git", - "vendor": "Red Hat, Inc.", - "version": "12.0", - "version-release": "12.0-20180124.1", + "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1", + "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98", + "vcs-type": "git", + "vendor": "Red Hat, Inc.", + "version": "12.0", + "version-release": "12.0-20180124.1", }, Architecture: "amd64", Os: "linux", @@ -288,7 +288,7 @@ func TestManifestSchema1UpdatedImageNeedsLayerDiffIDs(t *testing.T) { manifestSchema1FromComponentsLikeFixture(t), } { for mt, expected := range map[string]bool{ - "": false, + "": false, manifest.DockerV2Schema1MediaType: false, manifest.DockerV2Schema1SignedMediaType: false, manifest.DockerV2Schema2MediaType: true, diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index b639ab714f5..cee60f824c1 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor)) + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache) if err != nil { return nil, err } @@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ if historyEntry.EmptyLayer { if !haveGzippedEmptyLayer { logrus.Debugf("Uploading empty layer during conversion to schema 1") - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false) + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false) if err != nil { return nil, errors.Wrap(err, "Error uploading empty layer") } diff --git a/vendor/github.com/containers/image/image/docker_schema2_test.go b/vendor/github.com/containers/image/image/docker_schema2_test.go index b0f1b1db06a..9d3f96fc6dd 100644 --- a/vendor/github.com/containers/image/image/docker_schema2_test.go +++ b/vendor/github.com/containers/image/image/docker_schema2_test.go @@ -32,7 +32,10 @@ func (f unusedImageSource) Close() error { func (f unusedImageSource) GetManifest(context.Context, *digest.Digest) ([]byte, string, error) { panic("Unexpected call to a mock function") } -func (f unusedImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +func (f unusedImageSource) HasThreadSafeGetBlob() bool { + panic("Unexpected call to a mock function") +} +func (f unusedImageSource) GetBlob(context.Context, types.BlobInfo, types.BlobInfoCache) (io.ReadCloser, int64, error) { panic("Unexpected call to a mock function") } func (f unusedImageSource) GetSignatures(context.Context, *digest.Digest) ([][]byte, error) { @@ -152,7 +155,7 @@ type configBlobImageSource struct { f func(digest digest.Digest) (io.ReadCloser, int64, error) } -func (f configBlobImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +func (f configBlobImageSource) GetBlob(ctx context.Context, info types.BlobInfo, _ types.BlobInfoCache) (io.ReadCloser, int64, error) { if info.Digest.String() != "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" { panic("Unexpected digest in GetBlob") } @@ -395,7 +398,10 @@ func (d *memoryImageDest) MustMatchRuntimeOS() bool { func (d *memoryImageDest) IgnoresEmbeddedDockerReference() bool { panic("Unexpected call to a mock function") } -func (d *memoryImageDest) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { +func (d *memoryImageDest) HasThreadSafePutBlob() bool { + panic("Unexpected call to a mock function") +} +func (d *memoryImageDest) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { if d.storedBlobs == nil { d.storedBlobs = make(map[digest.Digest][]byte) } @@ -409,10 +415,7 @@ func (d *memoryImageDest) PutBlob(ctx context.Context, stream io.Reader, inputIn d.storedBlobs[inputInfo.Digest] = contents return types.BlobInfo{Digest: inputInfo.Digest, Size: int64(len(contents))}, nil } -func (d *memoryImageDest) HasBlob(ctx context.Context, inputInfo types.BlobInfo) (bool, int64, error) { - panic("Unexpected call to a mock function") -} -func (d *memoryImageDest) ReapplyBlob(ctx context.Context, inputInfo types.BlobInfo) (types.BlobInfo, error) { +func (d *memoryImageDest) TryReusingBlob(context.Context, types.BlobInfo, types.BlobInfoCache, bool) (bool, types.BlobInfo, error) { panic("Unexpected call to a mock function") } func (d *memoryImageDest) PutManifest(ctx context.Context, m []byte) error { diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 298db360dcd..6fe2a9a3279 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -7,6 +7,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config)) + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/internal/testing/mocks/imagetransport.go b/vendor/github.com/containers/image/internal/testing/mocks/imagetransport.go new file mode 100644 index 00000000000..27c11f5e911 --- /dev/null +++ b/vendor/github.com/containers/image/internal/testing/mocks/imagetransport.go @@ -0,0 +1,24 @@ +package mocks + +import "github.com/containers/image/types" + +// NameImageTransport is a mock of types.ImageTransport which returns itself in Name. +type NameImageTransport string + +// Name returns the name of the transport, which must be unique among other transports. +func (name NameImageTransport) Name() string { + return string(name) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (name NameImageTransport) ParseReference(reference string) (types.ImageReference, error) { + panic("unexpected call to a mock function") +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (name NameImageTransport) ValidatePolicyConfigurationScope(scope string) error { + panic("unexpected call to a mock function") +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go index 3c6b7dffa0e..9571c37e2ba 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go @@ -77,20 +77,32 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { return d.unpackedDest.IgnoresEmbeddedDockerReference() } -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { - return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob -func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) { - return d.unpackedDest.HasBlob(ctx, info) +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false } -func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) { - return d.unpackedDest.ReapplyBlob(ctx, info) +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) } // PutManifest writes manifest to the destination diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go index d04773c1f19..ca74f950bfa 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -76,9 +76,16 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest return s.unpackedSrc.GetManifest(ctx, instanceDigest) } -// GetBlob returns a stream for the specified blob, and the blob's size. -func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { - return s.unpackedSrc.GetBlob(ctx, info) +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.unpackedSrc.GetBlob(ctx, info, cache) } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport_test.go b/vendor/github.com/containers/image/oci/archive/oci_transport_test.go index ed3f85b88c9..b87b0d3d454 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_transport_test.go +++ b/vendor/github.com/containers/image/oci/archive/oci_transport_test.go @@ -189,7 +189,7 @@ func TestReferenceStringWithinTransport(t *testing.T) { for _, c := range []struct{ input, result string }{ {"/dir1:notlatest:notlatest", "/dir1:notlatest:notlatest"}, // Explicit image - {"/dir3:", "/dir3:"}, // No image + {"/dir3:", "/dir3:"}, // No image } { ref, err := ParseReference(tmpDir + c.input) require.NoError(t, err, c.input) diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index 35163275083..db102184dbc 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -107,13 +107,20 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { +func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err @@ -173,30 +180,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp return types.BlobInfo{Digest: computedDigest, Size: size}, nil } -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) { +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { - return false, -1, err + return false, types.BlobInfo{}, err } finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { - return false, -1, nil + return false, types.BlobInfo{}, nil } if err != nil { - return false, -1, err + return false, types.BlobInfo{}, err } - return true, finfo.Size(), nil -} - -func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) { - return info, nil + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest_test.go b/vendor/github.com/containers/image/oci/layout/oci_dest_test.go index bb79db87d87..9acad45c087 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest_test.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest_test.go @@ -3,10 +3,10 @@ package layout import ( "context" "os" - "testing" - "path/filepath" + "testing" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" "github.com/pkg/errors" "github.com/stretchr/testify/assert" @@ -31,6 +31,7 @@ func TestPutBlobDigestFailure(t *testing.T) { require.True(t, ok) blobPath, err := dirRef.blobPath(blobDigest, "") assert.NoError(t, err) + cache := blobinfocache.NewMemoryCache() firstRead := true reader := readerFromFunc(func(p []byte) (int, error) { @@ -52,7 +53,7 @@ func TestPutBlobDigestFailure(t *testing.T) { dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() - _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, false) + _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, cache, false) assert.Error(t, err) assert.Contains(t, digestErrorString, err.Error()) err = dest.Commit(context.Background()) diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go index 33115c00db5..cc536f69ef7 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -92,8 +92,15 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest return m, mimeType, nil } -// GetBlob returns a stream for the specified blob, and the blob's size. -func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if len(info.URLs) != 0 { return s.getExternalBlob(ctx, info.URLs) } diff --git a/vendor/github.com/containers/image/oci/layout/oci_src_test.go b/vendor/github.com/containers/image/oci/layout/oci_src_test.go index 1e1a5b237f1..75fae446c29 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src_test.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src_test.go @@ -12,6 +12,7 @@ import ( "strings" "testing" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" digest "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" @@ -40,6 +41,7 @@ func TestGetBlobForRemoteLayers(t *testing.T) { fmt.Fprintln(w, "Hello world") })) defer ts.Close() + cache := blobinfocache.NewMemoryCache() imageSource := createImageSource(t, &types.SystemContext{}) layerInfo := types.BlobInfo{ @@ -51,7 +53,7 @@ func TestGetBlobForRemoteLayers(t *testing.T) { }, } - reader, _, err := imageSource.GetBlob(context.Background(), layerInfo) + reader, _, err := imageSource.GetBlob(context.Background(), layerInfo, cache) require.NoError(t, err) defer reader.Close() @@ -64,10 +66,11 @@ func TestGetBlobForRemoteLayersWithTLS(t *testing.T) { imageSource := createImageSource(t, &types.SystemContext{ OCICertPath: "fixtures/accepted_certs", }) + cache := blobinfocache.NewMemoryCache() layer, size, err := imageSource.GetBlob(context.Background(), types.BlobInfo{ URLs: []string{httpServerAddr}, - }) + }, cache) require.NoError(t, err) layerContent, _ := ioutil.ReadAll(layer) @@ -79,9 +82,10 @@ func TestGetBlobForRemoteLayersOnTLSFailure(t *testing.T) { imageSource := createImageSource(t, &types.SystemContext{ OCICertPath: "fixtures/rejected_certs", }) + cache := blobinfocache.NewMemoryCache() layer, size, err := imageSource.GetBlob(context.Background(), types.BlobInfo{ URLs: []string{httpServerAddr}, - }) + }, cache) require.Error(t, err) assert.Nil(t, layer) diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport_test.go b/vendor/github.com/containers/image/oci/layout/oci_transport_test.go index b9029094caa..90fcbc62eec 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_transport_test.go +++ b/vendor/github.com/containers/image/oci/layout/oci_transport_test.go @@ -169,7 +169,7 @@ func TestReferenceStringWithinTransport(t *testing.T) { for _, c := range []struct{ input, result string }{ {"/dir1:notlatest:notlatest", "/dir1:notlatest:notlatest"}, // Explicit image - {"/dir3:", "/dir3:"}, // No image + {"/dir3:", "/dir3:"}, // No image } { ref, err := ParseReference(tmpDir + c.input) require.NoError(t, err, c.input) diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index dbd04f10bc0..814c3eea1de 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -211,12 +211,19 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest * return s.docker.GetManifest(ctx, instanceDigest) } +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if err := s.ensureImageIsResolved(ctx); err != nil { return nil, 0, err } - return s.docker.GetBlob(ctx, info) + return s.docker.GetBlob(ctx, info, cache) } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. @@ -376,26 +383,31 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { return d.docker.IgnoresEmbeddedDockerReference() } +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. +// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { - return d.docker.PutBlob(ctx, stream, inputInfo, isConfig) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) { - return d.docker.HasBlob(ctx, info) -} - -func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) { - return d.docker.ReapplyBlob(ctx, info) +func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index afff7dc1b59..d69f4fa331a 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -4,7 +4,6 @@ package ostree import ( "bytes" - "compress/gzip" "context" "encoding/base64" "encoding/json" @@ -24,6 +23,7 @@ import ( "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/ostreedev/ostree-go/pkg/otbuiltin" @@ -132,7 +132,20 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } -func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") if err != nil { return types.BlobInfo{}, err @@ -241,7 +254,7 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin } func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { - mfz := gzip.NewWriter(output) + mfz := pgzip.NewWriter(output) defer mfz.Close() metaPacker := storage.NewJSONPacker(mfz) @@ -322,12 +335,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) } -func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) { - +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if d.repo == nil { repo, err := openRepo(d.ref.repo) if err != nil { - return false, 0, err + return false, types.BlobInfo{}, err } d.repo = repo } @@ -335,29 +354,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") if err != nil || !found { - return found, -1, err + return found, types.BlobInfo{}, err } found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") if err != nil || !found { - return found, -1, err + return found, types.BlobInfo{}, err } found, data, err = readMetadata(d.repo, branch, "docker.size") if err != nil || !found { - return found, -1, err + return found, types.BlobInfo{}, err } size, err := strconv.ParseInt(data, 10, 64) if err != nil { - return false, -1, err + return false, types.BlobInfo{}, err } - return true, size, nil -} - -func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) { - return info, nil + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go index 1f325b2a732..df432c9f3ad 100644 --- a/vendor/github.com/containers/image/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/ostree/ostree_src.go @@ -4,7 +4,6 @@ package ostree import ( "bytes" - "compress/gzip" "context" "encoding/base64" "fmt" @@ -17,6 +16,7 @@ import ( "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" glib "github.com/ostreedev/ostree-go/pkg/glibobject" "github.com/pkg/errors" @@ -255,8 +255,15 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, return getter.Get(path) } -// GetBlob returns a stream for the specified blob, and the blob's size. -func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) { +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { blob := info.Digest.Hex() @@ -302,7 +309,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i } mf := bytes.NewReader(tarsplit) - mfz, err := gzip.NewReader(mf) + mfz, err := pgzip.NewReader(mf) if err != nil { return nil, 0, err } diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go new file mode 100644 index 00000000000..4ee80913448 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go @@ -0,0 +1,329 @@ +package blobinfocache + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + + // FIXME: For CRI-O, does this need to hide information between different users? + + // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. + uncompressedDigestBucket = []byte("uncompressedDigest") + // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest + // (as a set of key=digest, value="" pairs) + digestByUncompressedBucket = []byte("digestByUncompressed") + // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing + // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). + knownLocationsBucket = []byte("knownLocations") +) + +// Concurrency: +// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely +// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. + +// pathLock contains a lock for a specific BoltDB database path. +type pathLock struct { + refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! + mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. +} + +var ( + // pathLocks contains a lock for each currently open file. + // This must be global so that independently created instances of boltDBCache exclude each other. + // The map is protected by pathLocksMutex. + // FIXME? Should this be based on device:inode numbers instead of paths instead? + pathLocks = map[string]*pathLock{} + pathLocksMutex = sync.Mutex{} +) + +// lockPath obtains the pathLock for path. +// The caller must call unlockPath eventually. +func lockPath(path string) { + pl := func() *pathLock { // A scope for defer + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if ok { + pl.refCount++ + } else { + pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} + pathLocks[path] = pl + } + return pl + }() + pl.mutex.Lock() +} + +// unlockPath releases the pathLock for path. +func unlockPath(path string) { + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if !ok { + // Should this return an error instead? BlobInfoCache ultimately ignores errors… + panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) + } + pl.mutex.Unlock() + pl.refCount-- + if pl.refCount == 0 { + delete(pathLocks, path) + } +} + +// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path. +// +// Note that we don’t keep the database open across operations, because that would lock the file and block any other +// users; instead, we need to open/close it for every single write or lookup. +type boltDBCache struct { + path string +} + +// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path. +// Most users should call DefaultCache instead. +func NewBoltDBCache(path string) types.BlobInfoCache { + return &boltDBCache{path: path} +} + +// view returns runs the specified fn within a read-only transaction on the database. +func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) { + // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, + // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding + // a read lock, blocking any future writes. + // Hence this preliminary check, which is RACY: Another process could remove the file + // between the Lstat call and opening the database. + if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { + return err + } + + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.View(fn) +} + +// update returns runs the specified fn within a read-write transaction on the database. +func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) { + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, nil) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.Update(fn) +} + +// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. +func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { + if b := tx.Bucket(uncompressedDigestBucket); b != nil { + if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { + d, err := digest.Parse(string(uncompressedBytes)) + if err == nil { + return d + } + // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + } + // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if b := tx.Bucket(digestByUncompressedBucket); b != nil { + if b = b.Bucket([]byte(anyDigest.String())); b != nil { + c := b.Cursor() + if k, _ := c.First(); k != nil { // The bucket is non-empty + return anyDigest + } + } + } + return "" +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + var res digest.Digest + if err := bdc.view(func(tx *bolt.Tx) error { + res = bdc.uncompressedDigest(tx, anyDigest) + return nil + }); err != nil { // Including os.IsNotExist(err) + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + previous, err := digest.Parse(string(previousBytes)) + if err != nil { + return err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if err := b.Put(key, []byte(uncompressed.String())); err != nil { + return err + } + + b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) + if err != nil { + return err + } + if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) + if err != nil { + return err + } + value, err := time.Now().MarshalBinary() + if err != nil { + return err + } + if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. +func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime { + b := scopeBucket.Bucket([]byte(digest.String())) + if b == nil { + return candidates + } + _ = b.ForEach(func(k, v []byte) error { + t := time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return err + } + candidates = append(candidates, candidateWithTime{ + candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: types.BICLocationReference{Opaque: string(k)}, + }, + lastSeen: t, + }) + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + res := []candidateWithTime{} + var uncompressedDigestValue digest.Digest // = "" + if err := bdc.view(func(tx *bolt.Tx) error { + scopeBucket := tx.Bucket(knownLocationsBucket) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) + if scopeBucket == nil { + return nil + } + + res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + if canSubstitute { + if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { + b := tx.Bucket(digestByUncompressedBucket) + if b != nil { + b = b.Bucket([]byte(uncompressedDigestValue.String())) + if b != nil { + if err := b.ForEach(func(k, _ []byte) error { + d, err := digest.Parse(string(k)) + if err != nil { + return err + } + if d != primaryDigest && d != uncompressedDigestValue { + res = bdc.appendReplacementCandidates(res, scopeBucket, d) + } + return nil + }); err != nil { + return err + } + } + } + if uncompressedDigestValue != primaryDigest { + res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + } + } + } + return nil + }); err != nil { // Including os.IsNotExist(err) + return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + + return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb_test.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb_test.go new file mode 100644 index 00000000000..ff6d3cd7113 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb_test.go @@ -0,0 +1,30 @@ +package blobinfocache + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/containers/image/types" + "github.com/stretchr/testify/require" +) + +func newTestBoltDBCache(t *testing.T) (types.BlobInfoCache, func(t *testing.T)) { + // We need a separate temporary directory here, because bolt.Open(…, &bolt.Options{Readonly:true}) can't deal with + // an existing but empty file, and incorrectly fails without releasing the lock - which in turn causes + // any future writes to hang. Creating a temporary directory allows us to use a path to a + // non-existent file, thus replicating the expected conditions for creating a new DB. + dir, err := ioutil.TempDir("", "boltdb") + require.NoError(t, err) + return NewBoltDBCache(filepath.Join(dir, "db")), func(t *testing.T) { + err = os.RemoveAll(dir) + require.NoError(t, err) + } +} + +func TestNewBoltDBCache(t *testing.T) { + testGenericCache(t, newTestBoltDBCache) +} + +// FIXME: Tests for the various corner cases / failure cases of boltDBCache should be added here. diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go new file mode 100644 index 00000000000..459ae5c0664 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go @@ -0,0 +1,63 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/image/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.boltdb" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, os.Geteuid()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return NewMemoryCache() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) + return NewMemoryCache() + } + + logrus.Debugf("Using blob info cache at %s", path) + return NewBoltDBCache(path) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default_test.go b/vendor/github.com/containers/image/pkg/blobinfocache/default_test.go new file mode 100644 index 00000000000..facfdeff88e --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/default_test.go @@ -0,0 +1,162 @@ +package blobinfocache + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + + "github.com/containers/image/types" + "github.com/stretchr/testify/assert" +) + +func TestBlobInfoCacheDir(t *testing.T) { + const nondefaultDir = "/this/is/not/the/default/cache/dir" + const rootPrefix = "/root/prefix" + const homeDir = "/fake/home/directory" + const xdgDataHome = "/fake/home/directory/XDG" + + // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not + // run in parallel unless they opt in by calling t.Parallel(). So don’t do that. + oldXRD, hasXRD := os.LookupEnv("XDG_RUNTIME_DIR") + defer func() { + if hasXRD { + os.Setenv("XDG_RUNTIME_DIR", oldXRD) + } else { + os.Unsetenv("XDG_RUNTIME_DIR") + } + }() + // FIXME: This should be a shared helper in internal/testing + oldHome, hasHome := os.LookupEnv("HOME") + defer func() { + if hasHome { + os.Setenv("HOME", oldHome) + } else { + os.Unsetenv("HOME") + } + }() + + os.Setenv("HOME", homeDir) + os.Setenv("XDG_DATA_HOME", xdgDataHome) + + // The default paths and explicit overrides + for _, c := range []struct { + sys *types.SystemContext + euid int + expected string + }{ + // The common case + {nil, 0, systemBlobInfoCacheDir}, + {nil, 1, filepath.Join(xdgDataHome, "containers", "cache")}, + // There is a context, but it does not override the path. + {&types.SystemContext{}, 0, systemBlobInfoCacheDir}, + {&types.SystemContext{}, 1, filepath.Join(xdgDataHome, "containers", "cache")}, + // Path overridden + {&types.SystemContext{BlobInfoCacheDir: nondefaultDir}, 0, nondefaultDir}, + {&types.SystemContext{BlobInfoCacheDir: nondefaultDir}, 1, nondefaultDir}, + // Root overridden + {&types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, 0, filepath.Join(rootPrefix, systemBlobInfoCacheDir)}, + {&types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, 1, filepath.Join(xdgDataHome, "containers", "cache")}, + // Root and path overrides present simultaneously, + { + &types.SystemContext{ + RootForImplicitAbsolutePaths: rootPrefix, + BlobInfoCacheDir: nondefaultDir, + }, + 0, nondefaultDir, + }, + { + &types.SystemContext{ + RootForImplicitAbsolutePaths: rootPrefix, + BlobInfoCacheDir: nondefaultDir, + }, + 1, nondefaultDir, + }, + } { + path, err := blobInfoCacheDir(c.sys, c.euid) + require.NoError(t, err) + assert.Equal(t, c.expected, path) + } + + // Paths used by unprivileged users + for _, c := range []struct { + xdgDH, home, expected string + }{ + {"", homeDir, filepath.Join(homeDir, ".local", "share", "containers", "cache")}, // HOME only + {xdgDataHome, "", filepath.Join(xdgDataHome, "containers", "cache")}, // XDG_DATA_HOME only + {xdgDataHome, homeDir, filepath.Join(xdgDataHome, "containers", "cache")}, // both + {"", "", ""}, // neither + } { + if c.xdgDH != "" { + os.Setenv("XDG_DATA_HOME", c.xdgDH) + } else { + os.Unsetenv("XDG_DATA_HOME") + } + if c.home != "" { + os.Setenv("HOME", c.home) + } else { + os.Unsetenv("HOME") + } + for _, sys := range []*types.SystemContext{nil, {}} { + path, err := blobInfoCacheDir(sys, 1) + if c.expected != "" { + require.NoError(t, err) + assert.Equal(t, c.expected, path) + } else { + assert.Error(t, err) + } + } + } +} + +func TestDefaultCache(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestDefaultCache") + require.NoError(t, err) + //defer os.RemoveAll(tmpDir) + + // Success + normalDir := filepath.Join(tmpDir, "normal") + c := DefaultCache(&types.SystemContext{BlobInfoCacheDir: normalDir}) + // This is ugly hard-coding internals of boltDBCache: + assert.Equal(t, &boltDBCache{path: filepath.Join(normalDir, blobInfoCacheFilename)}, c) + + // Error running blobInfoCacheDir: + // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not + // run in parallel unless they opt in by calling t.Parallel(). So don’t do that. + oldXRD, hasXRD := os.LookupEnv("XDG_RUNTIME_DIR") + defer func() { + if hasXRD { + os.Setenv("XDG_RUNTIME_DIR", oldXRD) + } else { + os.Unsetenv("XDG_RUNTIME_DIR") + } + }() + // FIXME: This should be a shared helper in internal/testing + oldHome, hasHome := os.LookupEnv("HOME") + defer func() { + if hasHome { + os.Setenv("HOME", oldHome) + } else { + os.Unsetenv("HOME") + } + }() + os.Unsetenv("HOME") + os.Unsetenv("XDG_DATA_HOME") + c = DefaultCache(nil) + assert.IsType(t, NewMemoryCache(), c) + + // Error creating the parent directory: + unwritableDir := filepath.Join(tmpDir, "unwritable") + err = os.Mkdir(unwritableDir, 700) + require.NoError(t, err) + defer os.Chmod(unwritableDir, 0700) // To make it possible to remove it again + err = os.Chmod(unwritableDir, 0500) + require.NoError(t, err) + st, _ := os.Stat(unwritableDir) + logrus.Errorf("%s: %#v", unwritableDir, st) + c = DefaultCache(&types.SystemContext{BlobInfoCacheDir: filepath.Join(unwritableDir, "subdirectory")}) + assert.IsType(t, NewMemoryCache(), c) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/generic_test.go b/vendor/github.com/containers/image/pkg/blobinfocache/generic_test.go new file mode 100644 index 00000000000..0543a0e639e --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/generic_test.go @@ -0,0 +1,170 @@ +package blobinfocache + +import ( + "testing" + + "github.com/containers/image/internal/testing/mocks" + + "github.com/containers/image/types" + digest "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" +) + +const ( + digestUnknown = digest.Digest("sha256:1111111111111111111111111111111111111111111111111111111111111111") + digestUncompressed = digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222") + digestCompressedA = digest.Digest("sha256:3333333333333333333333333333333333333333333333333333333333333333") + digestCompressedB = digest.Digest("sha256:4444444444444444444444444444444444444444444444444444444444444444") + digestCompressedUnrelated = digest.Digest("sha256:5555555555555555555555555555555555555555555555555555555555555555") + digestCompressedPrimary = digest.Digest("sha256:6666666666666666666666666666666666666666666666666666666666666666") +) + +// testGenericCache runs an implementation-independent set of tests, given a +// newTestCache, which can be called repeatedly and always returns a (cache, cleanup callback) pair +func testGenericCache(t *testing.T, newTestCache func(t *testing.T) (types.BlobInfoCache, func(t *testing.T))) { + for _, s := range []struct { + name string + fn func(t *testing.T, cache types.BlobInfoCache) + }{ + {"UncompressedDigest", testGenericUncompressedDigest}, + {"RecordDigestUncompressedPair", testGenericRecordDigestUncompressedPair}, + {"RecordKnownLocations", testGenericRecordKnownLocations}, + {"CandidateLocations", testGenericCandidateLocations}, + } { + t.Run(s.name, func(t *testing.T) { + cache, cleanup := newTestCache(t) + defer cleanup(t) + s.fn(t, cache) + }) + } +} + +func testGenericUncompressedDigest(t *testing.T, cache types.BlobInfoCache) { + // Nothing is known. + assert.Equal(t, digest.Digest(""), cache.UncompressedDigest(digestUnknown)) + + cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed) + cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed) + // Known compressed→uncompressed mapping + assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedA)) + assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedB)) + // This implicitly marks digestUncompressed as uncompressed. + assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestUncompressed)) + + // Known uncompressed→self mapping + cache.RecordDigestUncompressedPair(digestCompressedUnrelated, digestCompressedUnrelated) + assert.Equal(t, digestCompressedUnrelated, cache.UncompressedDigest(digestCompressedUnrelated)) +} + +func testGenericRecordDigestUncompressedPair(t *testing.T, cache types.BlobInfoCache) { + for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things. + // Known compressed→uncompressed mapping + cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed) + assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedA)) + // Two mappings to the same uncompressed digest + cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed) + assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedB)) + + // Mapping an uncompresesd digest to self + cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed) + assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestUncompressed)) + } +} + +func testGenericRecordKnownLocations(t *testing.T, cache types.BlobInfoCache) { + transport := mocks.NameImageTransport("==BlobInfocache transport mock") + for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things. + for _, scopeName := range []string{"A", "B"} { // Run the test in two different scopes to verify they don't affect each other. + scope := types.BICTransportScope{Opaque: scopeName} + for _, digest := range []digest.Digest{digestCompressedA, digestCompressedB} { // Two different digests should not affect each other either. + lr1 := types.BICLocationReference{Opaque: scopeName + "1"} + lr2 := types.BICLocationReference{Opaque: scopeName + "2"} + cache.RecordKnownLocation(transport, scope, digest, lr2) + cache.RecordKnownLocation(transport, scope, digest, lr1) + assert.Equal(t, []types.BICReplacementCandidate{ + {Digest: digest, Location: lr1}, + {Digest: digest, Location: lr2}, + }, cache.CandidateLocations(transport, scope, digest, false)) + } + } + } +} + +// candidate is a shorthand for types.BICReplacementCandiddate +type candidate struct { + d digest.Digest + lr string +} + +func assertCandidatesMatch(t *testing.T, scopeName string, expected []candidate, actual []types.BICReplacementCandidate) { + e := make([]types.BICReplacementCandidate, len(expected)) + for i, ev := range expected { + e[i] = types.BICReplacementCandidate{Digest: ev.d, Location: types.BICLocationReference{Opaque: scopeName + ev.lr}} + } + assert.Equal(t, e, actual) +} + +func testGenericCandidateLocations(t *testing.T, cache types.BlobInfoCache) { + transport := mocks.NameImageTransport("==BlobInfocache transport mock") + cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed) + cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed) + cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed) + digestNameSet := []struct { + n string + d digest.Digest + }{ + {"U", digestUncompressed}, + {"A", digestCompressedA}, + {"B", digestCompressedB}, + {"CU", digestCompressedUnrelated}, + } + + for _, scopeName := range []string{"A", "B"} { // Run the test in two different scopes to verify they don't affect each other. + scope := types.BICTransportScope{Opaque: scopeName} + // Nothing is known. + assert.Equal(t, []types.BICReplacementCandidate{}, cache.CandidateLocations(transport, scope, digestUnknown, false)) + assert.Equal(t, []types.BICReplacementCandidate{}, cache.CandidateLocations(transport, scope, digestUnknown, true)) + + // Record "2" entries before "1" entries; then results should sort "1" (more recent) before "2" (older) + for _, suffix := range []string{"2", "1"} { + for _, e := range digestNameSet { + cache.RecordKnownLocation(transport, scope, e.d, types.BICLocationReference{Opaque: scopeName + e.n + suffix}) + } + } + + // No substitutions allowed: + for _, e := range digestNameSet { + assertCandidatesMatch(t, scopeName, []candidate{ + {d: e.d, lr: e.n + "1"}, {d: e.d, lr: e.n + "2"}, + }, cache.CandidateLocations(transport, scope, e.d, false)) + } + + // With substitutions: The original digest is always preferred, then other compressed, then the uncompressed one. + assertCandidatesMatch(t, scopeName, []candidate{ + {d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"}, + {d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"}, + {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"}, + }, cache.CandidateLocations(transport, scope, digestCompressedA, true)) + + assertCandidatesMatch(t, scopeName, []candidate{ + {d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"}, + {d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"}, + {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"}, + }, cache.CandidateLocations(transport, scope, digestCompressedB, true)) + + assertCandidatesMatch(t, scopeName, []candidate{ + {d: digestUncompressed, lr: "U1"}, {d: digestUncompressed, lr: "U2"}, + // "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order + {d: digestCompressedB, lr: "B1"}, + {d: digestCompressedA, lr: "A1"}, + {d: digestCompressedB, lr: "B2"}, + // Beyond the replacementAttempts limit: {d: digestCompressedA, lr: "A2"}, + }, cache.CandidateLocations(transport, scope, digestUncompressed, true)) + + // Locations are known, but no relationships + assertCandidatesMatch(t, scopeName, []candidate{ + {d: digestCompressedUnrelated, lr: "CU1"}, {d: digestCompressedUnrelated, lr: "CU2"}, + }, cache.CandidateLocations(transport, scope, digestCompressedUnrelated, true)) + + } +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go new file mode 100644 index 00000000000..1ce7dee13b5 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go @@ -0,0 +1,123 @@ +package blobinfocache + +import ( + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// memoryCache implements an in-memory-only BlobInfoCache +type memoryCache struct { + uncompressedDigests map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference +} + +// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only. +// This is primarily intended for tests, but also used as a fallback if DefaultCache +// can’t determine, or set up, the location for a persistent cache. +// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache. +func NewMemoryCache() types.BlobInfoCache { + return &memoryCache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + } +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = map[digest.Digest]struct{}{} + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime { + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + for l, t := range locations { + candidates = append(candidates, candidateWithTime{ + candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: l, + }, + lastSeen: t, + }) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + res := []candidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + for d := range otherDigests { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d) + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + } + } + } + return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory_test.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory_test.go new file mode 100644 index 00000000000..c6fcbf7a354 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory_test.go @@ -0,0 +1,15 @@ +package blobinfocache + +import ( + "testing" + + "github.com/containers/image/types" +) + +func newTestMemoryCache(t *testing.T) (types.BlobInfoCache, func(t *testing.T)) { + return NewMemoryCache(), func(t *testing.T) {} +} + +func TestNewMemoryCache(t *testing.T) { + testGenericCache(t, newTestMemoryCache) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none.go new file mode 100644 index 00000000000..5658d89ffb6 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/none.go @@ -0,0 +1,47 @@ +package blobinfocache + +import ( + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for Manifest.Inspect, +// because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache. +var NoCache types.BlobInfoCache = noCache{} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go new file mode 100644 index 00000000000..02709aa1c05 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go @@ -0,0 +1,108 @@ +package blobinfocache + +import ( + "sort" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" +) + +// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// candidateWithTime is the input to types.BICReplacementCandidate prioritization. +type candidateWithTime struct { + candidate types.BICReplacementCandidate // The replacement candidate + lastSeen time.Time // Time the candidate was last known to exist (either read or written) +} + +// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, +// along with the specially-treated digest values for the implementation of sort.Interface.Less +type candidateSortState struct { + cs []candidateWithTime // The entries to sort + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) Len() int { + return len(css.cs) +} + +func (css *candidateSortState) Less(i, j int) bool { + xi := css.cs[i] + xj := css.cs[j] + + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.candidate.Digest != xj.candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.candidate.Digest == css.primaryDigest { + return true + } + if xj.candidate.Digest == css.primaryDigest { + return false + } + if css.uncompressedDigest != "" { + if xi.candidate.Digest == css.uncompressedDigest { + return false + } + if xj.candidate.Digest == css.uncompressedDigest { + return true + } + } + } else { // xi.candidate.Digest == xj.candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { + return xi.lastSeen.After(xj.lastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time + return xi.lastSeen.After(xj.lastSeen) + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return xi.candidate.Digest < xj.candidate.Digest +} + +func (css *candidateSortState) Swap(i, j int) { + css.cs[i], css.cs[j] = css.cs[j], css.cs[i] +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the +// number of entries to limit, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + sort.Sort(&candidateSortState{ + cs: cs, + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }) + + resLength := len(cs) + if resLength > maxCandidates { + resLength = maxCandidates + } + res := make([]types.BICReplacementCandidate, resLength) + for i := range res { + res[i] = cs[i].candidate + } + return res +} + +// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), +// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize_test.go b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize_test.go new file mode 100644 index 00000000000..1ca0fe392c5 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize_test.go @@ -0,0 +1,163 @@ +package blobinfocache + +import ( + "fmt" + "testing" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" +) + +var ( + // cssLiteral contains a non-trivial candidateSortState shared among several tests below. + cssLiteral = candidateSortState{ + cs: []candidateWithTime{ + {types.BICReplacementCandidate{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}}, time.Unix(1, 0)}, + {types.BICReplacementCandidate{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}}, time.Unix(1, 1)}, + {types.BICReplacementCandidate{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}}, time.Unix(1, 1)}, + {types.BICReplacementCandidate{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}}, time.Unix(1, 0)}, + {types.BICReplacementCandidate{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}}, time.Unix(1, 1)}, + {types.BICReplacementCandidate{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}}, time.Unix(1, 1)}, + {types.BICReplacementCandidate{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}}, time.Unix(2, 0)}, + {types.BICReplacementCandidate{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}}, time.Unix(1, 0)}, + }, + primaryDigest: digestCompressedPrimary, + uncompressedDigest: digestUncompressed, + } + // cssExpectedReplacementCandidates is the fully-sorted, unlimited, result of prioritizing cssLiteral. + cssExpectedReplacementCandidates = []types.BICReplacementCandidate{ + {Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}}, + {Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}}, + {Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}}, + {Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}}, + {Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}}, + {Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}}, + {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}}, + {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}}, + } +) + +func TestCandidateSortStateLen(t *testing.T) { + css := cssLiteral + assert.Equal(t, 8, css.Len()) + + css.cs = []candidateWithTime{} + assert.Equal(t, 0, css.Len()) +} + +func TestCandidateSortStateLess(t *testing.T) { + type p struct { + d digest.Digest + t int64 + } + + // Primary criteria: Also ensure that time does not matter + for _, c := range []struct { + name string + res int + d0, d1 digest.Digest + }{ + {"primary < any", -1, digestCompressedPrimary, digestCompressedA}, + {"any < uncompressed", -1, digestCompressedA, digestUncompressed}, + {"primary < uncompressed", -1, digestCompressedPrimary, digestUncompressed}, + } { + for _, tms := range [][2]int64{{1, 2}, {2, 1}, {1, 1}} { + caseName := fmt.Sprintf("%s %v", c.name, tms) + css := candidateSortState{ + cs: []candidateWithTime{ + {types.BICReplacementCandidate{Digest: c.d0, Location: types.BICLocationReference{Opaque: "L0"}}, time.Unix(tms[0], 0)}, + {types.BICReplacementCandidate{Digest: c.d1, Location: types.BICLocationReference{Opaque: "L1"}}, time.Unix(tms[1], 0)}, + }, + primaryDigest: digestCompressedPrimary, + uncompressedDigest: digestUncompressed, + } + assert.Equal(t, c.res < 0, css.Less(0, 1), caseName) + assert.Equal(t, c.res > 0, css.Less(1, 0), caseName) + + if c.d0 != digestUncompressed && c.d1 != digestUncompressed { + css.uncompressedDigest = "" + assert.Equal(t, c.res < 0, css.Less(0, 1), caseName) + assert.Equal(t, c.res > 0, css.Less(1, 0), caseName) + + css.uncompressedDigest = css.primaryDigest + assert.Equal(t, c.res < 0, css.Less(0, 1), caseName) + assert.Equal(t, c.res > 0, css.Less(1, 0), caseName) + } + } + } + + // Ordering within the three primary groups + for _, c := range []struct { + name string + res int + p0, p1 p + }{ + {"primary: t=2 < t=1", -1, p{digestCompressedPrimary, 2}, p{digestCompressedPrimary, 1}}, + {"primary: t=1 == t=1", 0, p{digestCompressedPrimary, 1}, p{digestCompressedPrimary, 1}}, + {"uncompressed: t=2 < t=1", -1, p{digestUncompressed, 2}, p{digestUncompressed, 1}}, + {"uncompressed: t=1 == t=1", 0, p{digestUncompressed, 1}, p{digestUncompressed, 1}}, + {"any: t=2 < t=1, [d=A vs. d=B lower-priority]", -1, p{digestCompressedA, 2}, p{digestCompressedB, 1}}, + {"any: t=2 < t=1, [d=B vs. d=A lower-priority]", -1, p{digestCompressedB, 2}, p{digestCompressedA, 1}}, + {"any: t=2 < t=1, [d=A vs. d=A lower-priority]", -1, p{digestCompressedA, 2}, p{digestCompressedA, 1}}, + {"any: t=1 == t=1, d=A < d=B", -1, p{digestCompressedA, 1}, p{digestCompressedB, 1}}, + {"any: t=1 == t=1, d=A == d=A", 0, p{digestCompressedA, 1}, p{digestCompressedA, 1}}, + } { + css := candidateSortState{ + cs: []candidateWithTime{ + {types.BICReplacementCandidate{Digest: c.p0.d, Location: types.BICLocationReference{Opaque: "L0"}}, time.Unix(c.p0.t, 0)}, + {types.BICReplacementCandidate{Digest: c.p1.d, Location: types.BICLocationReference{Opaque: "L1"}}, time.Unix(c.p1.t, 0)}, + }, + primaryDigest: digestCompressedPrimary, + uncompressedDigest: digestUncompressed, + } + assert.Equal(t, c.res < 0, css.Less(0, 1), c.name) + assert.Equal(t, c.res > 0, css.Less(1, 0), c.name) + + if c.p0.d != digestUncompressed && c.p1.d != digestUncompressed { + css.uncompressedDigest = "" + assert.Equal(t, c.res < 0, css.Less(0, 1), c.name) + assert.Equal(t, c.res > 0, css.Less(1, 0), c.name) + + css.uncompressedDigest = css.primaryDigest + assert.Equal(t, c.res < 0, css.Less(0, 1), c.name) + assert.Equal(t, c.res > 0, css.Less(1, 0), c.name) + } + } +} + +func TestCandidateSortStateSwap(t *testing.T) { + freshCSS := func() candidateSortState { // Return a deep copy of cssLiteral which is safe to modify. + res := cssLiteral + res.cs = append([]candidateWithTime{}, cssLiteral.cs...) + return res + } + + css := freshCSS() + css.Swap(0, 1) + assert.Equal(t, cssLiteral.cs[1], css.cs[0]) + assert.Equal(t, cssLiteral.cs[0], css.cs[1]) + assert.Equal(t, cssLiteral.cs[2], css.cs[2]) + + css = freshCSS() + css.Swap(1, 1) + assert.Equal(t, cssLiteral, css) +} + +func TestDestructivelyPrioritizeReplacementCandidatesWithMax(t *testing.T) { + for _, max := range []int{0, 1, replacementAttempts, 100} { + // Just a smoke test; we mostly rely on test coverage in TestCandidateSortStateLess + res := destructivelyPrioritizeReplacementCandidatesWithMax(append([]candidateWithTime{}, cssLiteral.cs...), digestCompressedPrimary, digestUncompressed, max) + if max > len(cssExpectedReplacementCandidates) { + max = len(cssExpectedReplacementCandidates) + } + assert.Equal(t, cssExpectedReplacementCandidates[:max], res) + } +} + +func TestDestructivelyPrioritizeReplacementCandidates(t *testing.T) { + // Just a smoke test; we mostly rely on test coverage in TestCandidateSortStateLess + res := destructivelyPrioritizeReplacementCandidates(append([]candidateWithTime{}, cssLiteral.cs...), digestCompressedPrimary, digestUncompressed) + assert.Equal(t, cssExpectedReplacementCandidates[:replacementAttempts], res) +} diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go index a39523e4f4b..aad2bfcf269 100644 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/pkg/compression/compression.go @@ -3,10 +3,10 @@ package compression import ( "bytes" "compress/bzip2" - "compress/gzip" "io" "io/ioutil" + "github.com/klauspost/pgzip" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" @@ -18,7 +18,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error) // GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { - return gzip.NewReader(r) + return pgzip.NewReader(r) } // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice_test.go b/vendor/github.com/containers/image/pkg/strslice/strslice_test.go index 1163b3652c9..0ef5f4bce20 100644 --- a/vendor/github.com/containers/image/pkg/strslice/strslice_test.go +++ b/vendor/github.com/containers/image/pkg/strslice/strslice_test.go @@ -29,8 +29,8 @@ func TestStrSliceMarshalJSON(t *testing.T) { func TestStrSliceUnmarshalJSON(t *testing.T) { parts := map[string][]string{ - "": {"default", "values"}, - "[]": {}, + "": {"default", "values"}, + "[]": {}, `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, } for json, expectedParts := range parts { diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go index 3235f1ef5ea..9e3e9cfe19a 100644 --- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go @@ -3,10 +3,8 @@ package sysregistriesv2 import ( "fmt" "io/ioutil" - "net" - "net/url" + "os" "path/filepath" - "strconv" "strings" "sync" @@ -84,8 +82,8 @@ func (e *InvalidRegistries) Error() string { } // parseURL parses the input string, performs some sanity checks and returns -// the sanitized input string. An error is returned in case parsing fails or -// or if URI scheme or user is set. +// the sanitized input string. An error is returned if the input string is +// empty or if contains an "http{s,}://" prefix. func parseURL(input string) (string, error) { trimmed := strings.TrimRight(input, "/") @@ -93,56 +91,8 @@ func parseURL(input string) (string, error) { return "", &InvalidRegistries{s: "invalid URL: cannot be empty"} } - // Ultimately, we expect input of the form example.com[/namespace/…], a prefix - // of a fully-expended reference (containers/image/docker/Reference.String()). - // c/image/docker/Reference does not currently provide such a parser. - // So, we use url.Parse("http://"+trimmed) below to ~verify the format, possibly - // letting some invalid input in, trading that off for a simpler parser. - // - // url.Parse("http://"+trimmed) is, sadly, too permissive, notably for - // trimmed == "http://example.com/…", url.Parse("http://http://example.com/…") - // is accepted and parsed as - // {Scheme: "http", Host: "http:", Path: "//example.com/…"}. - // - // So, first we do an explicit check for an unwanted scheme prefix: - - // This will parse trimmed=="http://example.com/…" with Scheme: "http". Perhaps surprisingly, - // it also succeeds for the input we want to accept, in different ways: - // "example.com" -> {Scheme:"", Opaque:"", Path:"example.com"} - // "example.com/repo" -> {Scheme:"", Opaque:"", Path:"example.com/repo"} - // "example.com:5000" -> {Scheme:"example.com", Opaque:"5000"} - // "example.com:5000/repo" -> {Scheme:"example.com", Opaque:"5000/repo"} - trimmedIsIP := false - if sep := strings.LastIndex(trimmed, ":"); sep != -1 && net.ParseIP(trimmed[:sep]) != nil { - if _, err := strconv.ParseUint(trimmed[sep+1:], 10, 16); err != nil { - msg := fmt.Sprintf("invalid URL '%s': invalid port number '%s' in numeric IPv4 address", input, trimmed[sep+1:]) - return "", &InvalidRegistries{s: msg} - } - trimmedIsIP = true - } - if !trimmedIsIP { - uri, err := url.Parse(trimmed) - if err != nil { - return "", &InvalidRegistries{s: fmt.Sprintf("invalid URL '%s': %v", input, err)} - } - - // Check if a URI Scheme is set. - // Note that URLs that do not start with a slash after the scheme are - // interpreted as `scheme:opaque[?query][#fragment]`; see above for examples. - if uri.Scheme != "" && uri.Opaque == "" { - msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input) - return "", &InvalidRegistries{s: msg} - } - } - - uri, err := url.Parse("http://" + trimmed) - if err != nil { - msg := fmt.Sprintf("invalid URL '%s': sanitized URL did not parse: %v", input, err) - return "", &InvalidRegistries{s: msg} - } - - if uri.User != nil { - msg := fmt.Sprintf("invalid URL '%s': user/password are not supported", trimmed) + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input) return "", &InvalidRegistries{s: msg} } @@ -291,7 +241,18 @@ var configMutex = sync.Mutex{} // are synchronized via configMutex. var configCache = make(map[string][]Registry) +// InvalidateCache invalidates the registry cache. This function is meant to be +// used for long-running processes that need to reload potential changes made to +// the cached registry config files. +func InvalidateCache() { + configMutex.Lock() + defer configMutex.Unlock() + configCache = make(map[string][]Registry) +} + // GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { configPath := getConfigPath(ctx) @@ -305,6 +266,13 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { // load the config config, err := loadRegistryConf(configPath) if err != nil { + // Return an empty []Registry if we use the default config, + // which implies that the config path of the SystemContext + // isn't set. Note: if ctx.SystemRegistriesConfPath points to + // the default config, we will still return an error. + if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { + return []Registry{}, nil + } return nil, err } @@ -335,23 +303,62 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { // FindUnqualifiedSearchRegistries returns all registries that are configured // for unqualified image search (i.e., with Registry.Search == true). -func FindUnqualifiedSearchRegistries(registries []Registry) []Registry { +func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, error) { + registries, err := GetRegistries(ctx) + if err != nil { + return nil, err + } + unqualified := []Registry{} for _, reg := range registries { if reg.Search { unqualified = append(unqualified, reg) } } - return unqualified + return unqualified, nil } -// FindRegistry returns the Registry with the longest prefix for ref. If no -// Registry prefixes the image, nil is returned. -func FindRegistry(ref string, registries []Registry) *Registry { +// refMatchesPrefix returns true iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchesPrefix(ref, prefix string) bool { + switch { + case len(ref) < len(prefix): + return false + case len(ref) == len(prefix): + return ref == prefix + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return false + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + return c == ':' || c == '/' || c == '@' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. +func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { + registries, err := GetRegistries(ctx) + if err != nil { + return nil, err + } + reg := Registry{} prefixLen := 0 for _, r := range registries { - if strings.HasPrefix(ref, r.Prefix) { + if refMatchesPrefix(ref, r.Prefix) { length := len(r.Prefix) if length > prefixLen { reg = r @@ -360,9 +367,9 @@ func FindRegistry(ref string, registries []Registry) *Registry { } } if prefixLen != 0 { - return ® + return ®, nil } - return nil + return nil, nil } // Reads the global registry file from the filesystem. Returns a byte array. diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2_test.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2_test.go index f7e4e95de24..252f23980a6 100644 --- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2_test.go +++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2_test.go @@ -1,6 +1,7 @@ package sysregistriesv2 import ( + "fmt" "testing" "github.com/containers/image/types" @@ -25,12 +26,7 @@ func TestParseURL(t *testing.T) { assert.Contains(t, err.Error(), "invalid URL 'https://example.com': URI schemes are not supported") _, err = parseURL("john.doe@example.com") - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "invalid URL 'john.doe@example.com': user/password are not supported") - - _, err = parseURL("127.0.0.1:123456") - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "invalid port number '123456' in numeric IPv4 address") + assert.Nil(t, err) // valid URLs url, err = parseURL("example.com") @@ -48,18 +44,6 @@ func TestParseURL(t *testing.T) { url, err = parseURL("example.com:5000/with/path") assert.Nil(t, err) assert.Equal(t, "example.com:5000/with/path", url) - - url, err = parseURL("example.com:5000") - assert.Nil(t, err) - assert.Equal(t, "example.com:5000", url) - - url, err = parseURL("172.30.0.1") - assert.Nil(t, err) - assert.Equal(t, "172.30.0.1", url) - - url, err = parseURL("172.30.0.1:5000") // often used in OpenShift - assert.Nil(t, err) - assert.Equal(t, "172.30.0.1:5000", url) } func TestEmptyConfig(t *testing.T) { @@ -92,8 +76,8 @@ blocked = true`) assert.Nil(t, err) assert.Equal(t, 2, len(registries)) - var reg *Registry - reg = FindRegistry("registry.com/image:tag", registries) + reg, err := FindRegistry(nil, "registry.com/image:tag") + assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, 2, len(reg.Mirrors)) assert.Equal(t, "mirror-1.registry.com", reg.Mirrors[0].URL) @@ -137,6 +121,46 @@ url = "mirror-b.com" assert.Contains(t, err.Error(), "invalid URL") } +func TestRefMatchesPrefix(t *testing.T) { + for _, c := range []struct { + ref, prefix string + expected bool + }{ + // Prefix is a reference.Domain() value + {"docker.io", "docker.io", true}, + {"docker.io", "example.com", false}, + {"example.com:5000", "example.com:5000", true}, + {"example.com:50000", "example.com:5000", false}, + {"example.com:5000", "example.com", true}, // FIXME FIXME This is unintended and undocumented, don't rely on this behavior + {"example.com/foo", "example.com", true}, + {"example.com/foo/bar", "example.com", true}, + {"example.com/foo/bar:baz", "example.com", true}, + {"example.com/foo/bar@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com", true}, + // Prefix is a reference.Named.Name() value or a repo namespace + {"docker.io", "docker.io/library", false}, + {"docker.io/library", "docker.io/library", true}, + {"example.com/library", "docker.io/library", false}, + {"docker.io/libraryy", "docker.io/library", false}, + {"docker.io/library/busybox", "docker.io/library", true}, + {"docker.io", "docker.io/library/busybox", false}, + {"docker.io/library/busybox", "docker.io/library/busybox", true}, + {"example.com/library/busybox", "docker.io/library/busybox", false}, + {"docker.io/library/busybox2", "docker.io/library/busybox", false}, + // Prefix is a single image + {"example.com", "example.com/foo:bar", false}, + {"example.com/foo", "example.com/foo:bar", false}, + {"example.com/foo:bar", "example.com/foo:bar", true}, + {"example.com/foo:bar2", "example.com/foo:bar", false}, + {"example.com", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", false}, + {"example.com/foo", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", false}, + {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true}, + {"example.com/foo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", false}, + } { + res := refMatchesPrefix(c.ref, c.prefix) + assert.Equal(t, c.expected, res, fmt.Sprintf("%s vs. %s", c.ref, c.prefix)) + } +} + func TestFindRegistry(t *testing.T) { testConfig = []byte(` [[registry]] @@ -163,23 +187,41 @@ prefix = ""`) assert.Nil(t, err) assert.Equal(t, 5, len(registries)) - var reg *Registry - reg = FindRegistry("simple-prefix.com/foo/bar:latest", registries) + reg, err := FindRegistry(nil, "simple-prefix.com/foo/bar:latest") + assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "simple-prefix.com", reg.Prefix) assert.Equal(t, reg.URL, "registry.com:5000") - reg = FindRegistry("complex-prefix.com:4000/with/path/and/beyond:tag", registries) + // path match + reg, err = FindRegistry(nil, "simple-prefix.com/") + assert.Nil(t, err) + assert.NotNil(t, reg) + + // hostname match + reg, err = FindRegistry(nil, "simple-prefix.com") + assert.Nil(t, err) + assert.NotNil(t, reg) + + // invalid match + reg, err = FindRegistry(nil, "simple-prefix.comx") + assert.Nil(t, err) + assert.Nil(t, reg) + + reg, err = FindRegistry(nil, "complex-prefix.com:4000/with/path/and/beyond:tag") + assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "complex-prefix.com:4000/with/path", reg.Prefix) assert.Equal(t, "another-registry.com:5000", reg.URL) - reg = FindRegistry("no-prefix.com/foo:tag", registries) + reg, err = FindRegistry(nil, "no-prefix.com/foo:tag") + assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "no-prefix.com", reg.Prefix) assert.Equal(t, "no-prefix.com", reg.URL) - reg = FindRegistry("empty-prefix.com/foo:tag", registries) + reg, err = FindRegistry(nil, "empty-prefix.com/foo:tag") + assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "empty-prefix.com", reg.Prefix) assert.Equal(t, "empty-prefix.com", reg.URL) @@ -217,7 +259,8 @@ unqualified-search = true assert.Nil(t, err) assert.Equal(t, 4, len(registries)) - unqRegs := FindUnqualifiedSearchRegistries(registries) + unqRegs, err := FindUnqualifiedSearchRegistries(nil) + assert.Nil(t, err) assertSearchRegistryURLsEqual(t, []string{"registry-a.com", "registry-c.com", "registry-d.com"}, unqRegs) } @@ -317,11 +360,13 @@ registries = ["registry-d.com", "registry-e.com", "registry-a.com"]`) assert.Nil(t, err) assert.Equal(t, 5, len(registries)) - unqRegs := FindUnqualifiedSearchRegistries(registries) + unqRegs, err := FindUnqualifiedSearchRegistries(nil) + assert.Nil(t, err) assertSearchRegistryURLsEqual(t, []string{"registry-a.com", "registry-c.com", "registry-d.com"}, unqRegs) // check if merging works - reg := FindRegistry("registry-a.com/bar/foo/barfoo:latest", registries) + reg, err := FindRegistry(nil, "registry-a.com/bar/foo/barfoo:latest") + assert.Nil(t, err) assert.NotNil(t, reg) assert.True(t, reg.Search) assert.True(t, reg.Insecure) @@ -396,3 +441,47 @@ insecure = true`) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) } + +func TestInvalidateCache(t *testing.T) { + testConfig = []byte(` +[[registry]] +url = "registry.com" + +[[registry.mirror]] +url = "mirror-1.registry.com" + +[[registry.mirror]] +url = "mirror-2.registry.com" + + +[[registry]] +url = "blocked.registry.com" +blocked = true + + +[[registry]] +url = "insecure.registry.com" +insecure = true + + +[[registry]] +url = "untrusted.registry.com" +insecure = true`) + + ctx := &types.SystemContext{} + + configCache = make(map[string][]Registry) + registries, err := GetRegistries(ctx) + assert.Nil(t, err) + assert.Equal(t, 4, len(registries)) + assertSearchRegistryURLsEqual(t, []string{"registry.com", "blocked.registry.com", "insecure.registry.com", "untrusted.registry.com"}, registries) + + // invalidate the cache, make sure it's empty and reload + InvalidateCache() + assert.Equal(t, 0, len(configCache)) + + registries, err = GetRegistries(ctx) + assert.Nil(t, err) + assert.Equal(t, 4, len(registries)) + assertSearchRegistryURLsEqual(t, []string{"registry.com", "blocked.registry.com", "insecure.registry.com", "untrusted.registry.com"}, registries) +} diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple_test.go b/vendor/github.com/containers/image/signature/policy_eval_simple_test.go index 4bc26405f35..78db959b98e 100644 --- a/vendor/github.com/containers/image/signature/policy_eval_simple_test.go +++ b/vendor/github.com/containers/image/signature/policy_eval_simple_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/containers/image/docker/reference" + "github.com/containers/image/internal/testing/mocks" "github.com/containers/image/types" ) @@ -21,7 +22,7 @@ func (nameOnlyImageMock) Reference() types.ImageReference { type nameOnlyImageReferenceMock string func (ref nameOnlyImageReferenceMock) Transport() types.ImageTransport { - return nameImageTransportMock("== Transport mock") + return mocks.NameImageTransport("== Transport mock") } func (ref nameOnlyImageReferenceMock) StringWithinTransport() string { return string(ref) diff --git a/vendor/github.com/containers/image/signature/policy_eval_test.go b/vendor/github.com/containers/image/signature/policy_eval_test.go index 25c76bb7f3f..a9808ad4c50 100644 --- a/vendor/github.com/containers/image/signature/policy_eval_test.go +++ b/vendor/github.com/containers/image/signature/policy_eval_test.go @@ -9,6 +9,7 @@ import ( "github.com/containers/image/docker" "github.com/containers/image/docker/policyconfiguration" "github.com/containers/image/docker/reference" + "github.com/containers/image/internal/testing/mocks" "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/stretchr/testify/assert" @@ -71,7 +72,7 @@ type pcImageReferenceMock struct { } func (ref pcImageReferenceMock) Transport() types.ImageTransport { - return nameImageTransportMock(ref.transportName) + return mocks.NameImageTransport(ref.transportName) } func (ref pcImageReferenceMock) StringWithinTransport() string { // We use this in error messages, so sadly we must return something. diff --git a/vendor/github.com/containers/image/signature/policy_reference_match_test.go b/vendor/github.com/containers/image/signature/policy_reference_match_test.go index 9fe267c7b60..66ac28ebf20 100644 --- a/vendor/github.com/containers/image/signature/policy_reference_match_test.go +++ b/vendor/github.com/containers/image/signature/policy_reference_match_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/containers/image/docker/reference" + "github.com/containers/image/internal/testing/mocks" "github.com/containers/image/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -15,7 +16,7 @@ const ( fullRHELRef = "registry.access.redhat.com/rhel7/rhel:7.2.3" untaggedRHELRef = "registry.access.redhat.com/rhel7/rhel" digestSuffix = "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - digestSuffixOther = "@sha256:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + digestSuffixOther = "@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" ) func TestParseImageAndDockerReference(t *testing.T) { @@ -77,7 +78,7 @@ type refImageReferenceMock struct{ reference.Named } func (ref refImageReferenceMock) Transport() types.ImageTransport { // We use this in error messages, so sady we must return something. But right now we do so only when DockerReference is nil, so restrict to that. if ref.Named == nil { - return nameImageTransportMock("== Transport mock") + return mocks.NameImageTransport("== Transport mock") } panic("unexpected call to a mock function") } @@ -110,19 +111,6 @@ func (ref refImageReferenceMock) DeleteImage(ctx context.Context, sys *types.Sys panic("unexpected call to a mock function") } -// nameImageTransportMock is a mock of types.ImageTransport which returns itself in Name. -type nameImageTransportMock string - -func (name nameImageTransportMock) Name() string { - return string(name) -} -func (name nameImageTransportMock) ParseReference(reference string) (types.ImageReference, error) { - panic("unexpected call to a mock function") -} -func (name nameImageTransportMock) ValidatePolicyConfigurationScope(scope string) error { - panic("unexpected call to a mock function") -} - type prmSymmetricTableTest struct { refA, refB string result bool diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index d1b010a766d..b53fbdf6e79 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -11,11 +11,13 @@ import ( "io/ioutil" "os" "path/filepath" + "sync" "sync/atomic" "github.com/containers/image/image" "github.com/containers/image/internal/tmpdir" "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" @@ -46,6 +48,7 @@ type storageImageSource struct { image *storage.Image layerPosition map[digest.Digest]int // Where we are in reading a blob's layers cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } @@ -55,6 +58,7 @@ type storageImageDestination struct { nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs manifest []byte // Manifest contents, temporary signatures []byte // Signature contents, temporary + putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them @@ -90,17 +94,24 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) { } // Reference returns the image reference that we used to find this image. -func (s storageImageSource) Reference() types.ImageReference { +func (s *storageImageSource) Reference() types.ImageReference { return s.imageRef } // Close cleans up any resources we tied up while reading the image. -func (s storageImageSource) Close() error { +func (s *storageImageSource) Close() error { return nil } -// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *storageImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { if info.Digest == image.GzippedEmptyLayerDigest { return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } @@ -134,8 +145,10 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC // Step through the list of matching layers. Tests may want to verify that if we have multiple layers // which claim to have the same contents, that we actually do have multiple layers, otherwise we could // just go ahead and use the first one every time. + s.getBlobMutex.Lock() i := s.layerPosition[info.Digest] s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() if len(layers) > 0 { layer = layers[i%len(layers)] } @@ -297,7 +310,7 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (s storageImageDestination) Reference() types.ImageReference { +func (s *storageImageDestination) Reference() types.ImageReference { return s.imageRef } @@ -306,7 +319,7 @@ func (s *storageImageDestination) Close() error { return os.RemoveAll(s.directory) } -func (s storageImageDestination) DesiredLayerCompression() types.LayerCompression { +func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { // We ultimately have to decompress layers to populate trees on disk, // so callers shouldn't bother compressing them before handing them to // us, if they're not already compressed. @@ -317,9 +330,22 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) } -// PutBlob stores a layer or data blob in our temporary directory, checking that any information -// in the blobinfo matches the incoming data. -func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, @@ -359,9 +385,11 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, return errorBlobInfo, ErrBlobSizeMismatch } // Record information about the blob. + s.putBlobMutex.Lock() s.blobDiffIDs[hasher.Digest()] = diffID.Digest() s.fileSizes[hasher.Digest()] = counter.Count s.filenames[hasher.Digest()] = filename + s.putBlobMutex.Unlock() blobDigest := blobinfo.Digest if blobDigest.Validate() != nil { blobDigest = hasher.Digest() @@ -370,6 +398,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, if blobSize < 0 { blobSize = counter.Count } + // This is safe because we have just computed both values ourselves. + cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) return types.BlobInfo{ Digest: blobDigest, Size: blobSize, @@ -377,59 +407,85 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, }, nil } -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be -// reapplied using ReapplyBlob. -// -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) { +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.putBlobMutex.Lock() + defer s.putBlobMutex.Unlock() if blobinfo.Digest == "" { - return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) + return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) } if err := blobinfo.Digest.Validate(); err != nil { - return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) + return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) } + // Check if we've already cached it in a file. if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, size, nil + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: size, + MediaType: blobinfo.MediaType, + }, nil } + // Check if we have a wasn't-compressed layer in storage that's based on that blob. layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) } if len(layers) > 0 { // Save this for completeness. s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, layers[0].UncompressedSize, nil + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil } + // Check if we have a was-compressed layer in storage that's based on that blob. layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) } if len(layers) > 0 { // Record the uncompressed value so that we can use it to calculate layer IDs. s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, layers[0].CompressedSize, nil + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].CompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if canSubstitute. + if canSubstitute { + if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest) + } + if len(layers) > 0 { + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + } } - // Nope, we don't have it. - return false, -1, nil -} -// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the -// same one when it walks the list in the manifest. -func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) { - present, size, err := s.HasBlob(ctx, blobinfo) - if !present { - return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) - } - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) - } - blobinfo.Size = size - return blobinfo, nil + // Nope, we don't have it. + return false, types.BlobInfo{}, nil } // computeID computes a recommended image ID based on information we have so far. If @@ -514,8 +570,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { if !haveDiffID { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), // or to even check if we had it. + // Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seeen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.HasBlob(ctx, blob.BlobInfo) + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false) if err != nil { return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) } diff --git a/vendor/github.com/containers/image/storage/storage_test.go b/vendor/github.com/containers/image/storage/storage_test.go index 9705d7b2e21..c696c82ef41 100644 --- a/vendor/github.com/containers/image/storage/storage_test.go +++ b/vendor/github.com/containers/image/storage/storage_test.go @@ -18,6 +18,7 @@ import ( "testing" "time" + "github.com/containers/image/pkg/blobinfocache" "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" @@ -353,6 +354,7 @@ func TestWriteRead(t *testing.T) { if ref == nil { t.Fatalf("ParseReference returned nil reference") } + cache := blobinfocache.NewMemoryCache() for _, manifestFmt := range manifests { dest, err := ref.NewImageDestination(context.Background(), systemContext()) @@ -378,11 +380,11 @@ func TestWriteRead(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination: %v", err) } t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", digest, size, decompressedSize) - if _, err := dest.PutBlob(context.Background(), bytes.NewBufferString(config), configInfo, false); err != nil { + if _, err := dest.PutBlob(context.Background(), bytes.NewBufferString(config), configInfo, cache, false); err != nil { t.Fatalf("Error saving config to destination: %v", err) } manifest := strings.Replace(manifestFmt, "%lh", digest.String(), -1) @@ -479,7 +481,7 @@ func TestWriteRead(t *testing.T) { } for _, layerInfo := range layerInfos { buf := bytes.Buffer{} - layer, size, err := src.GetBlob(context.Background(), layerInfo) + layer, size, err := src.GetBlob(context.Background(), layerInfo, cache) if err != nil { t.Fatalf("Error reading layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) } @@ -518,6 +520,7 @@ func TestDuplicateName(t *testing.T) { } newStore(t) + cache := blobinfocache.NewMemoryCache() ref, err := Transport.ParseReference("test") if err != nil { @@ -538,7 +541,7 @@ func TestDuplicateName(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) } manifest := fmt.Sprintf(` @@ -573,7 +576,7 @@ func TestDuplicateName(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: int64(size), Digest: digest, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) } manifest = fmt.Sprintf(` @@ -604,6 +607,7 @@ func TestDuplicateID(t *testing.T) { } newStore(t) + cache := blobinfocache.NewMemoryCache() ref, err := Transport.ParseReference("@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") if err != nil { @@ -624,7 +628,7 @@ func TestDuplicateID(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) } manifest := fmt.Sprintf(` @@ -659,7 +663,7 @@ func TestDuplicateID(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: int64(size), Digest: digest, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) } manifest = fmt.Sprintf(` @@ -693,6 +697,7 @@ func TestDuplicateNameID(t *testing.T) { } newStore(t) + cache := blobinfocache.NewMemoryCache() ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") if err != nil { @@ -713,7 +718,7 @@ func TestDuplicateNameID(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) } manifest := fmt.Sprintf(` @@ -748,7 +753,7 @@ func TestDuplicateNameID(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: int64(size), Digest: digest, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) } manifest = fmt.Sprintf(` @@ -828,6 +833,7 @@ func TestSize(t *testing.T) { } newStore(t) + cache := blobinfocache.NewMemoryCache() ref, err := Transport.ParseReference("test") if err != nil { @@ -844,21 +850,21 @@ func TestSize(t *testing.T) { if dest == nil { t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) } - if _, err := dest.PutBlob(context.Background(), bytes.NewBufferString(config), configInfo, false); err != nil { + if _, err := dest.PutBlob(context.Background(), bytes.NewBufferString(config), configInfo, cache, false); err != nil { t.Fatalf("Error saving config to destination: %v", err) } digest1, usize1, size1, blob := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size1, Digest: digest1, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 1 to destination: %v", err) } digest2, usize2, size2, blob := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size2, Digest: digest2, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 2 to destination: %v", err) } manifest := fmt.Sprintf(` @@ -919,6 +925,7 @@ func TestDuplicateBlob(t *testing.T) { } newStore(t) + cache := blobinfocache.NewMemoryCache() ref, err := Transport.ParseReference("test") if err != nil { @@ -939,26 +946,26 @@ func TestDuplicateBlob(t *testing.T) { if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob1), types.BlobInfo{ Size: size1, Digest: digest1, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 1 to destination (first copy): %v", err) } digest2, _, size2, blob2 := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob2), types.BlobInfo{ Size: size2, Digest: digest2, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 2 to destination (first copy): %v", err) } if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob1), types.BlobInfo{ Size: size1, Digest: digest1, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 1 to destination (second copy): %v", err) } if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob2), types.BlobInfo{ Size: size2, Digest: digest2, - }, false); err != nil { + }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 2 to destination (second copy): %v", err) } manifest := fmt.Sprintf(` diff --git a/vendor/github.com/containers/image/storage/storage_transport_test.go b/vendor/github.com/containers/image/storage/storage_transport_test.go index 857fe884d28..b2c119e2db3 100644 --- a/vendor/github.com/containers/image/storage/storage_transport_test.go +++ b/vendor/github.com/containers/image/storage/storage_transport_test.go @@ -33,7 +33,7 @@ func TestTransportParseStoreReference(t *testing.T) { {"[unterminated", "", ""}, // Unterminated store specifier {"[garbage]busybox", "docker.io/library/busybox:latest", ""}, // Store specifier is overridden by the store we pass to ParseStoreReference - {"UPPERCASEISINVALID", "", ""}, // Invalid single-component name + {"UPPERCASEISINVALID", "", ""}, // Invalid single-component name {"sha256:" + sha256digestHex, "docker.io/library/sha256:" + sha256digestHex, ""}, // Valid single-component name; the hex part is not an ID unless it has a "@" prefix, so it looks like a tag // FIXME: This test is now incorrect, this should not fail _if the image ID matches_ {sha256digestHex, "", ""}, // Invalid single-component ID; not an ID without a "@" prefix, so it's parsed as a name, but names aren't allowed to look like IDs @@ -45,10 +45,10 @@ func TestTransportParseStoreReference(t *testing.T) { {"busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, explicit tag {"docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, everything explicit - {"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@digestOrID - {"busybox@ab", "", ""}, // Invalid ID in name@digestOrID - {"busybox@", "", ""}, // Empty ID in name@digestOrID - {"busybox@sha256:ab", "", ""}, // Invalid digest in name@digestOrID + {"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@digestOrID + {"busybox@ab", "", ""}, // Invalid ID in name@digestOrID + {"busybox@", "", ""}, // Empty ID in name@digestOrID + {"busybox@sha256:ab", "", ""}, // Invalid digest in name@digestOrID {"busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, ""}, // Valid name@digest, no tag {"busybox@" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid name@ID, implicit tag // "busybox@aaaa", a valid image ID prefix, untested @@ -56,9 +56,9 @@ func TestTransportParseStoreReference(t *testing.T) { {"docker.io/library/busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid name@ID, everything explicit {"docker.io/library/busybox:notlatest@" + sha256Digest2, "docker.io/library/busybox:notlatest@" + sha256Digest2, ""}, // Valid name:tag@digest, everything explicit - {"busybox@sha256:" + sha256digestHex + "@ab", "", ""}, // Invalid ID in name@digest@ID - {"busybox@ab@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID - {"busybox@@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID + {"busybox@sha256:" + sha256digestHex + "@ab", "", ""}, // Invalid ID in name@digest@ID + {"busybox@ab@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID + {"busybox@@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID {"busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, sha256digestHex}, // name@digest@ID {"docker.io/library/busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, sha256digestHex}, // name@digest@ID, everything explicit {"docker.io/library/busybox:notlatest@sha256:" + sha256digestHex + "@" + sha256digestHex, "docker.io/library/busybox:notlatest@sha256:" + sha256digestHex, sha256digestHex}, // name:tag@digest@ID, everything explicit diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go index 17af60b30c6..76e3e755ff9 100644 --- a/vendor/github.com/containers/image/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/tarball/tarball_src.go @@ -2,7 +2,6 @@ package tarball import ( "bytes" - "compress/gzip" "context" "encoding/json" "fmt" @@ -14,7 +13,7 @@ import ( "time" "github.com/containers/image/types" - + "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -77,7 +76,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System // Set up to digest the file after we maybe decompress it. diffIDdigester := digest.Canonical.Digester() - uncompressed, err := gzip.NewReader(reader) + uncompressed, err := pgzip.NewReader(reader) if err == nil { // It is compressed, so the diffID is the digest of the uncompressed version reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) @@ -207,7 +206,15 @@ func (is *tarballImageSource) Close() error { return nil } -func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) { +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (is *tarballImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { // We should only be asked about things in the manifest. Maybe the configuration blob. if blobinfo.Digest == is.configID { return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 5d05b711a30..9fdab2314a4 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -100,6 +100,82 @@ type BlobInfo struct { MediaType string } +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cahce should just fall back to copying the blobs the usual way. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + // ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). // This is primarily useful for copying images around; for examining their properties, Image (below) // is usually more useful. @@ -120,7 +196,10 @@ type ImageSource interface { GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error) + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list @@ -148,8 +227,7 @@ const ( // ImageDestination is a service, possibly remote (= slow), to store components of a single image. // // There is a specific required order for some of the calls: -// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) -// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) // PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) // Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. // @@ -183,17 +261,21 @@ type ImageDestination interface { // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. + // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error) - // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. - // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. - // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); - // it returns a non-nil error only on an unexpected failure. - HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error) - // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree. - ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error) + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), @@ -324,6 +406,30 @@ type DockerAuthConfig struct { Password string } +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b == true { + o = OptionalBoolTrue + } + return o +} + // SystemContext allows parameterizing access to implicitly-accessed resources, // like configuration files in /etc and users' login state in their home directory. // Various components can share the same field only if their semantics is exactly @@ -351,6 +457,8 @@ type SystemContext struct { ArchitectureChoice string // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. OSChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string // Additional tags when creating or copying a docker-archive. DockerArchiveAdditionalTags []reference.NamedTagged @@ -376,7 +484,7 @@ type SystemContext struct { // Ignored if DockerCertPath is non-empty. DockerPerHostCertDirPath string // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - DockerInsecureSkipTLSVerify bool + DockerInsecureSkipTLSVerify OptionalBool // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials DockerAuthConfig *DockerAuthConfig // if not "", an User-Agent header is added to each request when contacting a registry. diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index de6dcbecf87..bbbb1a45844 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -16,7 +16,7 @@ github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 -github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3 github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 @@ -26,8 +26,9 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/vbatts/tar-split v0.10.2 golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe +golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 -gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678 +gopkg.in/cheggaaa/pb.v1 v1.0.27 gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6 github.com/xeipuuv/gojsonschema master @@ -36,10 +37,14 @@ github.com/xeipuuv/gojsonpointer master github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 -github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 +github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 github.com/pquerna/ffjson master github.com/syndtr/gocapability master github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175 github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a github.com/ulikunitz/xz v0.5.4 +github.com/boltdb/bolt master +github.com/klauspost/pgzip v1.2.1 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 diff --git a/vendor/github.com/containers/storage/.travis.yml b/vendor/github.com/containers/storage/.travis.yml index 1dbe0ceecf1..c786fd4f531 100644 --- a/vendor/github.com/containers/storage/.travis.yml +++ b/vendor/github.com/containers/storage/.travis.yml @@ -11,48 +11,44 @@ services: - docker env: - global: - - TRAVIS_ENV="-e TRAVIS=$TRAVIS - -e CI=$CI - -e TRAVIS_COMMIT=$TRAVIS_COMMIT - -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE - -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG - -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST - -e TRAVIS_PULL_REQUEST_SHA=$TRAVIS_PULL_REQUEST_SHA - -e TRAVIS_PULL_REQUEST_SLUG=$TRAVIS_PULL_REQUEST_SLUG - -e TRAVIS_BRANCH=$TRAVIS_BRANCH - -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID - -e TRAVIS_BUILD_DIR=$TRAVIS_BUILD_DIR" - matrix: - # Ubuntu - - GO_VERSION="stable" - DISTRO="ubuntu" - - - GO_VERSION="1.8" - DISTRO="ubuntu" - - - GO_VERSION="1.9" - DISTRO="ubuntu" - - # Fedora - - GO_VERSION="stable" - DISTRO="fedora" - - - GO_VERSION="1.9" - DISTRO="fedora" - - - GO_VERSION="1.8" - DISTRO="fedora" - - # CentOS - - GO_VERSION="stable" - DISTRO="centos" - - - GO_VERSION="1.9" - DISTRO="centos" - - - GO_VERSION="1.8" - DISTRO="centos" + # Ubuntu + - GO_VERSION="stable" + DISTRO="ubuntu" + + - GO_VERSION="1.11" + DISTRO="ubuntu" + + - GO_VERSION="1.10" + DISTRO="ubuntu" + + - GO_VERSION="1.9" + DISTRO="ubuntu" + + # Fedora + - GO_VERSION="stable" + DISTRO="fedora" + + - GO_VERSION="1.11" + DISTRO="fedora" + + - GO_VERSION="1.10" + DISTRO="fedora" + + - GO_VERSION="1.9" + DISTRO="fedora" + + # CentOS + - GO_VERSION="stable" + DISTRO="centos" + + - GO_VERSION="1.11" + DISTRO="centos" + + - GO_VERSION="1.10" + DISTRO="centos" + + - GO_VERSION="1.9" + DISTRO="centos" # GO_VERSION="stable" builds successfully, but tests fail on all platforms. # Run the tests, but ignore the result (for now) @@ -68,6 +64,17 @@ before_install: script: - echo "Travis/host environment:" + - export TRAVIS_ENV="-e TRAVIS=$TRAVIS + -e CI=$CI + -e TRAVIS_COMMIT=$TRAVIS_COMMIT + -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE + -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG + -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST + -e TRAVIS_PULL_REQUEST_SHA=$TRAVIS_PULL_REQUEST_SHA + -e TRAVIS_PULL_REQUEST_SLUG=$TRAVIS_PULL_REQUEST_SLUG + -e TRAVIS_BRANCH=$TRAVIS_BRANCH + -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID + -e TRAVIS_BUILD_DIR=$TRAVIS_BUILD_DIR" - env - echo - echo "Running tests in SPC using ./hack/run_ci_tests.sh" diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index f1f91430cb1..6f3dd2f4825 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -0.1-dev +1.5-dev diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 0a125331d80..beaf41f39c8 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -147,6 +147,13 @@ func (c *Container) ProcessLabel() string { return "" } +func (c *Container) MountOpts() []string { + if mountOpts, ok := c.Flags["MountOpts"].([]string); ok { + return mountOpts + } + return nil +} + func (r *containerStore) Containers() ([]Container, error) { containers := make([]Container, len(r.containers)) for i := range r.containers { @@ -293,6 +300,9 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat if _, idInUse := r.byid[id]; idInUse { return nil, ErrDuplicateID } + if options.MountOpts != nil { + options.Flags["MountOpts"] = append([]string{}, options.MountOpts...) + } names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go index 6e83808d4f2..aef6becfe04 100644 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -1,6 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. // source: containers.go -// package storage diff --git a/vendor/github.com/containers/storage/docs/containers-storage-mount.md b/vendor/github.com/containers/storage/docs/containers-storage-mount.md index 0c00aff3bd6..85559f27f86 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-mount.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-mount.md @@ -19,4 +19,5 @@ Specify an SELinux context for the mounted layer. **containers-storage mount my-container** ## SEE ALSO +containers-storage-mounted(1) containers-storage-unmount(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-mounted.md b/vendor/github.com/containers/storage/docs/containers-storage-mounted.md new file mode 100644 index 00000000000..204ac28edbc --- /dev/null +++ b/vendor/github.com/containers/storage/docs/containers-storage-mounted.md @@ -0,0 +1,17 @@ +## containers-storage-mounted 1 "November 2018" + +## NAME +containers-storage mounted - Check if a file system is mounted + +## SYNOPSIS +**containers-storage** **mounted** *LayerOrContainerNameOrID* + +## DESCRIPTION +Check if a filesystem is mounted + +## EXAMPLE +**containers-storage mounted my-container** + +## SEE ALSO +containers-storage-mount(1) +containers-storage-unmount(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-unmount.md b/vendor/github.com/containers/storage/docs/containers-storage-unmount.md index de56fced9b5..ef427a59488 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-unmount.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-unmount.md @@ -11,7 +11,9 @@ Unmounts a layer or a container's layer from the host's filesystem. ## EXAMPLE **containers-storage unmount my-container** + **containers-storage unmount /var/lib/containers/storage/mounts/my-container** ## SEE ALSO containers-storage-mount(1) +containers-storage-mounted(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage.conf.5.md b/vendor/github.com/containers/storage/docs/containers-storage.conf.5.md index 7ac3f98e2d0..dd0fd41499f 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage.conf.5.md +++ b/vendor/github.com/containers/storage/docs/containers-storage.conf.5.md @@ -39,8 +39,10 @@ The `storage` table supports the following options: **driver**="" container storage driver (default: "overlay") Default Copy On Write (COW) container storage driver + Valid drivers are "overlay", "vfs", "devmapper", "aufs", "btrfs", and "zfs" + Some drivers (for example, "zfs", "btrfs", and "aufs") may not work if your kernel lacks support for the filesystem -### STORAGE OPTIONS TABLE +### STORAGE OPTIONS TABLE The `storage.options` table supports the following options: diff --git a/vendor/github.com/containers/storage/docs/containers-storage.md b/vendor/github.com/containers/storage/docs/containers-storage.md index 1a5c1ea69ac..f7f443ced61 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage.md +++ b/vendor/github.com/containers/storage/docs/containers-storage.md @@ -53,37 +53,71 @@ configuration will be stored as data items. ## SUB-COMMANDS The *containers-storage* command's features are broken down into several subcommands: **containers-storage add-names(1)** Add layer, image, or container name or names + **containers-storage applydiff(1)** Apply a diff to a layer + **containers-storage changes(1)** Compare two layers + **containers-storage container(1)** Examine a container + **containers-storage containers(1)** List containers + **containers-storage create-container(1)** Create a new container from an image + **containers-storage create-image(1)** Create a new image using layers + **containers-storage create-layer(1)** Create a new layer + **containers-storage delete(1)** Delete a layer or image or container, with no safety checks + **containers-storage delete-container(1)** Delete a container, with safety checks + **containers-storage delete-image(1)** Delete an image, with safety checks + **containers-storage delete-layer(1)** Delete a layer, with safety checks + **containers-storage diff(1)** Compare two layers + **containers-storage diffsize(1)** Compare two layers + **containers-storage exists(1)** Check if a layer or image or container exists + **containers-storage get-container-data(1)** Get data that is attached to a container + **containers-storage get-image-data(1)** Get data that is attached to an image + **containers-storage image(1)** Examine an image + **containers-storage images(1)** List images + **containers-storage layers(1)** List layers + **containers-storage list-container-data(1)** List data items that are attached to a container + **containers-storage list-image-data(1)** List data items that are attached to an image + **containers-storage metadata(1)** Retrieve layer, image, or container metadata + **containers-storage mount(1)** Mount a layer or container + + **containers-storage mounted(1)** Check if a file system is mounted + **containers-storage set-container-data(1)** Set data that is attached to a container + **containers-storage set-image-data(1)** Set data that is attached to an image + **containers-storage set-metadata(1)** Set layer, image, or container metadata + **containers-storage set-names(1)** Set layer, image, or container name or names + **containers-storage shutdown(1)** Shut down graph driver + **containers-storage status(1)** Check on graph driver status + **containers-storage unmount(1)** Unmount a layer or container + **containers-storage version(1)** Return containers-storage version information + **containers-storage wipe(1)** Wipe all layers, images, and containers ## OPTIONS diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index 474c7574d23..ca69816be05 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -441,7 +441,7 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data if len(parents) > 0 { - if err := a.mount(id, m, options.MountLabel, parents); err != nil { + if err := a.mount(id, m, parents, options); err != nil { return "", err } } @@ -585,7 +585,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) { return layers, nil } -func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { +func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error { a.Lock() defer a.Unlock() @@ -596,7 +596,7 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str rw := a.getDiffPath(id) - if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + if err := a.aufsMount(layers, rw, target, options); err != nil { return fmt.Errorf("error creating aufs mount to %s: %v", target, err) } return nil @@ -643,7 +643,7 @@ func (a *Driver) Cleanup() error { return mountpk.Unmount(a.root) } -func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { +func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) { defer func() { if err != nil { Unmount(target) @@ -657,7 +657,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro if useDirperm() { offset += len(",dirperm1") } - b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) index := 0 @@ -670,21 +670,25 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro } opts := "dio,xino=/dev/shm/aufs.xino" - if a.mountOptions != "" { - opts += fmt.Sprintf(",%s", a.mountOptions) + mountOptions := a.mountOptions + if len(options.Options) > 0 { + mountOptions = strings.Join(options.Options, ",") + } + if mountOptions != "" { + opts += fmt.Sprintf(",%s", mountOptions) } if useDirperm() { opts += ",dirperm1" } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel) if err = mount("none", target, "aufs", 0, data); err != nil { return } for ; index < len(ro); index++ { layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel) if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { return } diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index adc34d209dd..567cda9d3ca 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -640,6 +640,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { if err != nil { return "", err } + if len(options.Options) > 0 { + return "", fmt.Errorf("btrfs driver does not support mount options") + } if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) diff --git a/vendor/github.com/containers/storage/drivers/copy/copy.go b/vendor/github.com/containers/storage/drivers/copy/copy.go new file mode 100644 index 00000000000..2617824c537 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy.go @@ -0,0 +1,277 @@ +// +build linux + +package copy + +/* +#include + +#ifndef FICLONE +#define FICLONE _IOW(0x94, 9, int) +#endif +*/ +import "C" +import ( + "container/list" + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota + // Hardlink creates a new hardlink to the existing file + Hardlink +) + +func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) + if err != nil { + return err + } + defer dstFile.Close() + + if *copyWithFileClone { + _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) + if err == nil { + return nil + } + + *copyWithFileClone = false + if err == unix.EXDEV { + *copyWithFileRange = false + } + } + if *copyWithFileRange { + err = doCopyWithFileRange(srcFile, dstFile, fileinfo) + // Trying the file_clone may not have caught the exdev case + // as the ioctl may not have been available (therefore EINVAL) + if err == unix.EXDEV || err == unix.ENOSYS { + *copyWithFileRange = false + } else { + return err + } + } + return legacyCopy(srcFile, dstFile) +} + +func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { + amountLeftToCopy := fileinfo.Size() + + for amountLeftToCopy > 0 { + n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) + if err != nil { + return err + } + + amountLeftToCopy = amountLeftToCopy - int64(n) + } + + return nil +} + +func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { + _, err := pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +type fileID struct { + dev uint64 + ino uint64 +} + +type dirMtimeInfo struct { + dstPath *string + stat *syscall.Stat_t +} + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling xattrs, and soft links +// +// Copying xattrs can be opted out of by passing false for copyXattrs. +func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { + copyWithFileRange := true + copyWithFileClone := true + + // This is a map of source file inodes to dst file paths + copiedFiles := make(map[fileID]string) + + dirsToSetMtimes := list.New() + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + id := fileID{dev: stat.Dev, ino: stat.Ino} + if copyMode == Hardlink { + isHardlink = true + if err2 := os.Link(srcPath, dstPath); err2 != nil { + return err2 + } + } else if hardLinkDstPath, ok := copiedFiles[id]; ok { + if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { + return err2 + } + } else { + if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { + return err2 + } + copiedFiles[id] = dstPath + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("unknown file type for %s", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if copyXattrs { + if err := doCopyXattrs(srcPath, dstPath); err != nil { + return err + } + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + // nolint: unconvert + if f.IsDir() { + dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) + } else if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { + mtimeInfo := e.Value.(*dirMtimeInfo) + ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} + if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { + return err + } + } + + return nil +} + +func doCopyXattrs(srcPath, dstPath string) error { + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_test.go b/vendor/github.com/containers/storage/drivers/copy/copy_test.go new file mode 100644 index 00000000000..80e0a28c4fe --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_test.go @@ -0,0 +1,159 @@ +// +build linux + +package copy + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCopy(t *testing.T) { + copyWithFileRange := true + copyWithFileClone := true + doCopyTest(t, ©WithFileRange, ©WithFileClone) +} + +func TestCopyWithoutRange(t *testing.T) { + copyWithFileRange := false + copyWithFileClone := false + doCopyTest(t, ©WithFileRange, ©WithFileClone) +} + +func TestCopyDir(t *testing.T) { + srcDir, err := ioutil.TempDir("", "srcDir") + assert.NilError(t, err) + populateSrcDir(t, srcDir, 3) + + dstDir, err := ioutil.TempDir("", "testdst") + assert.NilError(t, err) + defer os.RemoveAll(dstDir) + + assert.Check(t, DirCopy(srcDir, dstDir, Content, false)) + assert.NilError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + assert.NilError(t, err) + if relPath == "." { + return nil + } + + dstPath := filepath.Join(dstDir, relPath) + assert.NilError(t, err) + + // If we add non-regular dirs and files to the test + // then we need to add more checks here. + dstFileInfo, err := os.Lstat(dstPath) + assert.NilError(t, err) + + srcFileSys := f.Sys().(*syscall.Stat_t) + dstFileSys := dstFileInfo.Sys().(*syscall.Stat_t) + + t.Log(relPath) + if srcFileSys.Dev == dstFileSys.Dev { + assert.Check(t, srcFileSys.Ino != dstFileSys.Ino) + } + // Todo: check size, and ctim is not equal + /// on filesystems that have granular ctimes + assert.Check(t, is.DeepEqual(srcFileSys.Mode, dstFileSys.Mode)) + assert.Check(t, is.DeepEqual(srcFileSys.Uid, dstFileSys.Uid)) + assert.Check(t, is.DeepEqual(srcFileSys.Gid, dstFileSys.Gid)) + assert.Check(t, is.DeepEqual(srcFileSys.Mtim, dstFileSys.Mtim)) + + return nil + })) +} + +func randomMode(baseMode int) os.FileMode { + for i := 0; i < 7; i++ { + baseMode = baseMode | (1&rand.Intn(2))< 0 { + addNouuid := strings.Contains("nouuid", mountOptions) + mountOptions = strings.Join(moptions.Options, ",") + if addNouuid { + mountOptions = fmt.Sprintf("nouuid,%s", mountOptions) + } + } + + options = joinMountOptions(options, mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256))) diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index 9fc082d7d74..39a4fbe2c0e 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -9,8 +9,6 @@ import ( "path" "strconv" - "github.com/sirupsen/logrus" - "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/devicemapper" "github.com/containers/storage/pkg/idtools" @@ -18,6 +16,7 @@ import ( "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" + "github.com/sirupsen/logrus" ) func init() { @@ -189,7 +188,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, options.MountLabel); err != nil { + if err := d.DeviceSet.MountDevice(id, mp, options); err != nil { d.ctr.Decrement(mp) return "", err } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 4569c7b59ab..476b55160e5 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -49,6 +49,7 @@ type MountOpts struct { // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point UidMaps []idtools.IDMap GidMaps []idtools.IDMap + Options []string } // InitFunc initializes the storage driver. diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index 19da7d101a8..ab2c41e5878 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -8,6 +8,7 @@ import ( "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/sirupsen/logrus" ) @@ -167,7 +168,9 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin } defer driver.Put(id) - options := &archive.TarOptions{} + options := &archive.TarOptions{ + InUserNS: rsystem.RunningInUserNS(), + } if applyMappings != nil { options.UIDMaps = applyMappings.UIDs() options.GIDMaps = applyMappings.GIDs() diff --git a/vendor/github.com/containers/storage/drivers/graphtest/testutil.go b/vendor/github.com/containers/storage/drivers/graphtest/testutil.go index 82f59cba21e..87991fb6538 100644 --- a/vendor/github.com/containers/storage/drivers/graphtest/testutil.go +++ b/vendor/github.com/containers/storage/drivers/graphtest/testutil.go @@ -166,11 +166,20 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64 switch j % 3 { // Update file case 0: + var originalFileInfo, updatedFileInfo os.FileInfo change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) change.Kind = archive.ChangeModify - if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + if originalFileInfo, err = os.Stat(path.Join(root, change.Path)); err != nil { return nil, err } + for updatedFileInfo == nil || updatedFileInfo.ModTime().Equal(originalFileInfo.ModTime()) { + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + if updatedFileInfo, err = os.Stat(path.Join(root, change.Path)); err != nil { + return nil, err + } + } // Add file case 1: change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index b7e15a7f69b..2455b673609 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "sync" + "syscall" "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/overlayutils" @@ -29,6 +30,7 @@ import ( "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" + rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -164,6 +166,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap if err != nil { os.Remove(filepath.Join(home, linkDir)) os.Remove(home) + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return nil, err + } return nil, errors.Wrap(err, "kernel does not support overlay fs") } } @@ -284,6 +290,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI exec.Command("modprobe", "overlay").Run() layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + } if err == nil { // Check if reading the directory's contents populates the d_type field, which is required // for proper operation of the overlay filesystem. @@ -743,7 +755,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO workDir := path.Join(dir, "work") opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) - if d.options.mountOptions != "" { + if len(options.Options) > 0 { + opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts) + } else if d.options.mountOptions != "" { opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts) } mountData := label.FormatMountLabel(opts, options.MountLabel) @@ -874,6 +888,7 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str UIDMaps: idMappings.UIDs(), GIDMaps: idMappings.GIDs(), WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: rsystem.RunningInUserNS(), }); err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go new file mode 100644 index 00000000000..8137fcf67b6 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go @@ -0,0 +1,7 @@ +package vfs + +import "github.com/containers/storage/drivers/copy" + +func dirCopy(srcDir, dstDir string) error { + return copy.DirCopy(srcDir, dstDir, copy.Content, false) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go new file mode 100644 index 00000000000..8ac80ee1dba --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package vfs // import "github.com/containers/storage/drivers/vfs" + +import "github.com/containers/storage/pkg/chrootarchive" + +func dirCopy(srcDir, dstDir string) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index d10fb260769..f7f3c75ba0c 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/system" @@ -15,8 +14,8 @@ import ( ) var ( - // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar + // CopyDir defines the copy method to use. + CopyDir = dirCopy ) func init() { @@ -141,7 +140,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool if err != nil { return fmt.Errorf("%s: %s", parent, err) } - if err := CopyWithTar(parentDir, dir); err != nil { + if err := dirCopy(parentDir, dir); err != nil { return err } } @@ -181,6 +180,9 @@ func (d *Driver) Remove(id string) error { // Get returns the directory for the given id. func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { dir := d.dir(id) + if len(options.Options) > 0 { + return "", fmt.Errorf("vfs driver does not support mount options") + } if st, err := os.Stat(dir); err != nil { return "", err } else if !st.IsDir() { diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 4ccf657dcee..c6d86a4ab53 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -367,6 +367,9 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel) var dir string + if len(options.Options) > 0 { + return "", fmt.Errorf("windows driver does not support mount options") + } rID, err := d.resolveID(id) if err != nil { return "", err diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index 4e27a0a6f0b..c3ce6e86928 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -366,8 +366,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { return mountpoint, nil } + mountOptions := d.options.mountOptions + if len(options.Options) > 0 { + mountOptions = strings.Join(options.Options, ",") + } + filesystem := d.zfsPath(id) - opts := label.FormatMountLabel(d.options.mountOptions, options.MountLabel) + opts := label.FormatMountLabel(mountOptions, options.MountLabel) logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, opts) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) diff --git a/vendor/github.com/containers/storage/hack/run_ci_tests.sh b/vendor/github.com/containers/storage/hack/run_ci_tests.sh index a501dfd860c..1b41f07d706 100755 --- a/vendor/github.com/containers/storage/hack/run_ci_tests.sh +++ b/vendor/github.com/containers/storage/hack/run_ci_tests.sh @@ -11,6 +11,7 @@ DISTRO="${DISTRO:-ubuntu}" FQIN="docker.io/cevich/travis_${DISTRO}:latest" # This can take a while, start it going as early as possible +sudo docker info sudo docker pull $FQIN & REPO_DIR=$(realpath "$(dirname $0)/../") # assume parent directory of 'hack' diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 6c8f59b8b2d..299d2f81880 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -2,7 +2,6 @@ package storage import ( "bytes" - "compress/gzip" "encoding/json" "fmt" "io" @@ -20,6 +19,7 @@ import ( "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/truncindex" + "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -28,9 +28,8 @@ import ( ) const ( - tarSplitSuffix = ".tar-split.gz" - incompleteFlag = "incomplete" - compressionFlag = "diff-compression" + tarSplitSuffix = ".tar-split.gz" + incompleteFlag = "incomplete" ) // A Layer is a record of a copy-on-write layer that's stored by the lower @@ -1056,7 +1055,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } defer tsfile.Close() - decompressor, err := gzip.NewReader(tsfile) + decompressor, err := pgzip.NewReader(tsfile) if err != nil { return nil, err } @@ -1117,9 +1116,9 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter) tsdata := bytes.Buffer{} - compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed) + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) if err != nil { - compressor = gzip.NewWriter(&tsdata) + compressor = pgzip.NewWriter(&tsdata) } metadata := storage.NewJSONPacker(compressor) uncompressed, err := archive.DecompressStream(defragmented) diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go index 09b5d0f33e5..125b5d8c971 100644 --- a/vendor/github.com/containers/storage/layers_ffjson.go +++ b/vendor/github.com/containers/storage/layers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./layers.go +// source: layers.go package storage diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 4c43826256d..228d8bb8272 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -5,7 +5,6 @@ import ( "bufio" "bytes" "compress/bzip2" - "compress/gzip" "fmt" "io" "io/ioutil" @@ -22,6 +21,8 @@ import ( "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/promise" "github.com/containers/storage/pkg/system" + gzip "github.com/klauspost/pgzip" + rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/sirupsen/logrus" ) @@ -498,6 +499,8 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Gid = ta.ChownOpts.GID } + maybeTruncateHeaderModTime(hdr) + if ta.WhiteoutConverter != nil { wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) if err != nil { @@ -639,11 +642,12 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP { + if err == syscall.ENOTSUP || (err == syscall.EPERM && inUserns) { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still - // bail. + // bail. We also ignore EPERM errors if we are running in a + // user namespace. errors = append(errors, err.Error()) continue } @@ -1054,6 +1058,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { GIDMaps: tarMappings.GIDs(), Compression: Uncompressed, CopyPass: true, + InUserNS: rsystem.RunningInUserNS(), } archive, err := TarWithOptions(src, options) if err != nil { @@ -1068,6 +1073,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { UIDMaps: untarMappings.UIDs(), GIDMaps: untarMappings.GIDs(), ChownOpts: archiver.ChownOpts, + InUserNS: rsystem.RunningInUserNS(), } return archiver.Untar(archive, dst, options) } @@ -1087,6 +1093,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { UIDMaps: untarMappings.UIDs(), GIDMaps: untarMappings.GIDs(), ChownOpts: archiver.ChownOpts, + InUserNS: rsystem.RunningInUserNS(), } return archiver.Untar(archive, dst, options) } @@ -1186,6 +1193,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { UIDMaps: archiver.UntarIDMappings.UIDs(), GIDMaps: archiver.UntarIDMappings.GIDs(), ChownOpts: archiver.ChownOpts, + InUserNS: rsystem.RunningInUserNS(), } err = archiver.Untar(r, filepath.Dir(dst), options) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go index 22b8b48cc10..7bc44a5665e 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_110.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -4,8 +4,19 @@ package archive import ( "archive/tar" + "time" ) func copyPassHeader(hdr *tar.Header) { hdr.Format = tar.FormatPAX } + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go index d10d595fa8b..d19811fdbca 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_19.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -8,3 +8,6 @@ import ( func copyPassHeader(hdr *tar.Header) { } + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go index 9b8103e4da5..2ba44b85d73 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: pkg/archive/archive.go +// source: ./pkg/archive/archive.go package archive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index b9fa228e667..dde8d44d308 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -9,6 +9,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" + rsystem "github.com/opencontainers/runc/libcontainer/system" ) // NewArchiver returns a new Archiver which uses chrootarchive.Untar @@ -52,6 +53,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions } if options == nil { options = &archive.TarOptions{} + options.InUserNS = rsystem.RunningInUserNS() } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 2042128e3fb..5877c3b0688 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -21,6 +21,7 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringutils" digest "github.com/opencontainers/go-digest" @@ -501,6 +502,7 @@ type ContainerOptions struct { IDMappingOptions LabelOpts []string Flags map[string]interface{} + MountOpts []string } type store struct { @@ -1068,7 +1070,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, read } mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc) if err != nil { - return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q", parentLayer.ID) + return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q", layer.ID) } if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { @@ -2144,21 +2146,20 @@ func (s *store) DeleteContainer(id string) error { if err = rlstore.Delete(container.LayerID); err != nil { return err } - if err = rcstore.Delete(id); err != nil { - return err - } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - if err = os.RemoveAll(gcpath); err != nil { - return err - } - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) - if err = os.RemoveAll(rcpath); err != nil { - return err - } - return nil } - return ErrNotALayer + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil } } return ErrNotAContainer @@ -2279,10 +2280,14 @@ func (s *store) Version() ([][2]string, error) { func (s *store) Mount(id, mountLabel string) (string, error) { container, err := s.Container(id) - var uidMap, gidMap []idtools.IDMap + var ( + uidMap, gidMap []idtools.IDMap + mountOpts []string + ) if err == nil { uidMap, gidMap = container.UIDMap, container.GIDMap id = container.LayerID + mountOpts = container.MountOpts() } rlstore, err := s.LayerStore() if err != nil { @@ -2298,6 +2303,7 @@ func (s *store) Mount(id, mountLabel string) (string, error) { MountLabel: mountLabel, UidMaps: uidMap, GidMaps: gidMap, + Options: mountOpts, } return rlstore.Mount(id, options) } @@ -2986,7 +2992,8 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { return ret } -const defaultConfigFile = "/etc/containers/storage.conf" +// DefaultConfigFile path to the system wide storage.conf file +const DefaultConfigFile = "/etc/containers/storage.conf" // ThinpoolOptionsConfig represents the "storage.options.thinpool" // TOML config table. @@ -3231,5 +3238,25 @@ func init() { DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" DefaultStoreOptions.GraphDriverName = "" - ReloadConfigurationFile(defaultConfigFile, &DefaultStoreOptions) + ReloadConfigurationFile(DefaultConfigFile, &DefaultStoreOptions) +} + +func GetDefaultMountOptions() ([]string, error) { + mountOpts := []string{ + ".mountopt", + fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName), + } + for _, option := range DefaultStoreOptions.GraphDriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + for _, m := range mountOpts { + if m == key { + return strings.Split(val, ","), nil + } + } + } + return nil, nil } diff --git a/vendor/github.com/containers/storage/tests/helpers.bash b/vendor/github.com/containers/storage/tests/helpers.bash index 09fd9a57458..f4cf37e5a66 100755 --- a/vendor/github.com/containers/storage/tests/helpers.bash +++ b/vendor/github.com/containers/storage/tests/helpers.bash @@ -48,6 +48,7 @@ function storagewithsorting2() { populate() { # Create a base layer. run storage --debug=false create-layer + echo $output [ "$status" -eq 0 ] [ "$output" != "" ] lowerlayer="$output" diff --git a/vendor/github.com/containers/storage/tests/idmaps.bats b/vendor/github.com/containers/storage/tests/idmaps.bats index c6690a97d3f..b041f26cd76 100644 --- a/vendor/github.com/containers/storage/tests/idmaps.bats +++ b/vendor/github.com/containers/storage/tests/idmaps.bats @@ -3,6 +3,13 @@ load helpers @test "idmaps-create-apply-layer" { + case "$STORAGE_DRIVER" in + btrfs|devicemapper|overlay*|vfs|zfs) + ;; + *) + skip "not supported by driver $STORAGE_DRIVER" + ;; + esac n=5 host=2 # Create some temporary files. @@ -83,6 +90,13 @@ load helpers } @test "idmaps-create-diff-layer" { + case "$STORAGE_DRIVER" in + btrfs|devicemapper|overlay*|vfs|zfs) + ;; + *) + skip "not supported by driver $STORAGE_DRIVER" + ;; + esac n=5 host=2 # Create some temporary files. @@ -162,6 +176,13 @@ load helpers } @test "idmaps-create-container" { + case "$STORAGE_DRIVER" in + btrfs|devicemapper|overlay*|vfs|zfs) + ;; + *) + skip "not supported by driver $STORAGE_DRIVER" + ;; + esac n=5 host=2 # Create some temporary files. @@ -281,6 +302,13 @@ load helpers } @test "idmaps-parent-owners" { + case "$STORAGE_DRIVER" in + btrfs|devicemapper|overlay*|vfs|zfs) + ;; + *) + skip "not supported by driver $STORAGE_DRIVER" + ;; + esac n=5 # Create some temporary files. for i in $(seq $n) ; do @@ -335,6 +363,13 @@ load helpers } @test "idmaps-copy" { + case "$STORAGE_DRIVER" in + btrfs|devicemapper|overlay*|vfs|zfs) + ;; + *) + skip "not supported by driver $STORAGE_DRIVER" + ;; + esac n=5 host=2 # Create some temporary files. @@ -489,6 +524,13 @@ load helpers } @test "idmaps-create-mapped-image" { + case "$STORAGE_DRIVER" in + btrfs|devicemapper|overlay*|vfs|zfs) + ;; + *) + skip "not supported by driver $STORAGE_DRIVER" + ;; + esac n=5 host=2 # Create some temporary files. @@ -617,10 +659,10 @@ load helpers @test "idmaps-create-mapped-container" { case "$STORAGE_DRIVER" in - overlay*|vfs) + btrfs|devicemapper|overlay*|vfs|zfs) ;; - *) - skip "not supported by driver $STORAGE_DRIVER" + *) + skip "not supported by driver $STORAGE_DRIVER" ;; esac n=5 @@ -787,11 +829,11 @@ load helpers container="$output" # Mount the container. - run storage --debug=false mount $container + _TEST_FORCE_SUPPORT_SHIFTING=yes-please run storage --debug=false mount $container echo "$output" [ "$status" -eq 0 ] dir="$output" test "$(stat -c%u:%g $dir/file)" == "0:0" - run storage --debug=false unmount "$container" + _TEST_FORCE_SUPPORT_SHIFTING=yes-please run storage --debug=false unmount "$container" [ "$status" -eq 0 ] } diff --git a/vendor/github.com/containers/storage/tests/mount-layer.bats b/vendor/github.com/containers/storage/tests/mount-layer.bats index 9fbfce62b5b..2508436ec6a 100644 --- a/vendor/github.com/containers/storage/tests/mount-layer.bats +++ b/vendor/github.com/containers/storage/tests/mount-layer.bats @@ -75,6 +75,10 @@ load helpers [ "$status" -eq 0 ] [ "$output" == "" ] + # Mount the layer with nosuid + run storage --debug=false mount --option nosuid $layer + [ "$status" -ne 0 ] + # Delete the first layer run storage delete-layer $layer [ "$status" -eq 0 ] diff --git a/vendor/github.com/containers/storage/tests/test_drivers.bash b/vendor/github.com/containers/storage/tests/test_drivers.bash index 3663406bd62..2b54068517d 100755 --- a/vendor/github.com/containers/storage/tests/test_drivers.bash +++ b/vendor/github.com/containers/storage/tests/test_drivers.bash @@ -12,6 +12,11 @@ btrfs() { } devicemapper() { + for binary in pvcreate vgcreate lvcreate lvconvert lvchange thin_check ; do + if ! which $binary > /dev/null 2> /dev/null ; then + return 1 + fi + done pkg-config devmapper 2> /dev/null } diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index 059ae94f0ad..936321b12d9 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -9,7 +9,7 @@ github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/opencontainers/go-digest master github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd -github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 +github.com/ostreedev/ostree-go master github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pkg/errors master github.com/pmezard/go-difflib v1.0.0 @@ -21,3 +21,8 @@ github.com/tchap/go-patricia v2.2.6 github.com/vbatts/tar-split v0.10.2 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 +gotest.tools master +github.com/google/go-cmp master +github.com/klauspost/pgzip v1.2.1 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 diff --git a/vendor/github.com/gorilla/mux/.github/stale.yml b/vendor/github.com/gorilla/mux/.github/stale.yml new file mode 100644 index 00000000000..de8a67804bf --- /dev/null +++ b/vendor/github.com/gorilla/mux/.github/stale.yml @@ -0,0 +1,12 @@ +daysUntilStale: 60 +daysUntilClose: 7 +# Issues with these labels will never be considered stale +exemptLabels: + - v2 + - needs-review + - work-required +staleLabel: stale +markComment: > + This issue has been automatically marked as stale because it hasn't seen + a recent update. It'll be automatically closed in a few days. +closeComment: false diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 0425bb8018e..9e99a2703c8 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -88,7 +88,7 @@ r := mux.NewRouter() // Only matches if domain is "www.example.com". r.Host("www.example.com") // Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.domain.com") +r.Host("{subdomain:[a-z]+}.example.com") ``` There are several other matchers that can be added. To match path prefixes: @@ -238,13 +238,13 @@ This also works for host and query value variables: ```go r := mux.NewRouter() -r.Host("{subdomain}.domain.com"). +r.Host("{subdomain}.example.com"). Path("/articles/{category}/{id:[0-9]+}"). Queries("filter", "{filter}"). HandlerFunc(ArticleHandler). Name("article") -// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" +// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" url, err := r.Get("article").URL("subdomain", "news", "category", "technology", "id", "42", @@ -264,7 +264,7 @@ r.HeadersRegexp("Content-Type", "application/(text|json)") There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: ```go -// "http://news.domain.com/" +// "http://news.example.com/" host, err := r.Get("article").URLHost("subdomain", "news") // "/articles/technology/42" @@ -275,12 +275,12 @@ And if you use subrouters, host and path defined separately can be built as well ```go r := mux.NewRouter() -s := r.Host("{subdomain}.domain.com").Subrouter() +s := r.Host("{subdomain}.example.com").Subrouter() s.Path("/articles/{category}/{id:[0-9]+}"). HandlerFunc(ArticleHandler). Name("article") -// "http://news.domain.com/articles/technology/42" +// "http://news.example.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", "category", "technology", "id", "42") diff --git a/vendor/github.com/gorilla/mux/context.go b/vendor/github.com/gorilla/mux/context.go index 13a4601ae7c..665940a2682 100644 --- a/vendor/github.com/gorilla/mux/context.go +++ b/vendor/github.com/gorilla/mux/context.go @@ -16,7 +16,3 @@ func contextSet(r *http.Request, key, val interface{}) *http.Request { return r.WithContext(context.WithValue(r.Context(), key, val)) } - -func contextClear(r *http.Request) { - return -} diff --git a/vendor/github.com/gorilla/mux/middleware_test.go b/vendor/github.com/gorilla/mux/middleware_test.go index acf4e160b90..b708be0991a 100644 --- a/vendor/github.com/gorilla/mux/middleware_test.go +++ b/vendor/github.com/gorilla/mux/middleware_test.go @@ -375,3 +375,63 @@ func TestCORSMethodMiddleware(t *testing.T) { } } } + +func TestMiddlewareOnMultiSubrouter(t *testing.T) { + first := "first" + second := "second" + notFound := "404 not found" + + router := NewRouter() + firstSubRouter := router.PathPrefix("/").Subrouter() + secondSubRouter := router.PathPrefix("/").Subrouter() + + router.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Write([]byte(notFound)) + }) + + firstSubRouter.HandleFunc("/first", func(w http.ResponseWriter, r *http.Request) { + + }) + + secondSubRouter.HandleFunc("/second", func(w http.ResponseWriter, r *http.Request) { + + }) + + firstSubRouter.Use(func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(first)) + h.ServeHTTP(w, r) + }) + }) + + secondSubRouter.Use(func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(second)) + h.ServeHTTP(w, r) + }) + }) + + rw := NewRecorder() + req := newRequest("GET", "/first") + + router.ServeHTTP(rw, req) + if rw.Body.String() != first { + t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", first, rw.Body.String()) + } + + rw = NewRecorder() + req = newRequest("GET", "/second") + + router.ServeHTTP(rw, req) + if rw.Body.String() != second { + t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", second, rw.Body.String()) + } + + rw = NewRecorder() + req = newRequest("GET", "/second/not-exist") + + router.ServeHTTP(rw, req) + if rw.Body.String() != notFound { + t.Fatalf("Notfound handler did not run: expected %s for not-exist, (got %s)", notFound, rw.Body.String()) + } +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 4bbafa51da3..8aca972d2f6 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -22,7 +22,7 @@ var ( // NewRouter returns a new router instance. func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} + return &Router{namedRoutes: make(map[string]*Route)} } // Router registers routes to be matched and dispatches a handler. @@ -50,24 +50,78 @@ type Router struct { // Configurable Handler to be used when the request method does not match the route. MethodNotAllowedHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute // Routes to be matched, in order. routes []*Route + // Routes by name for URL building. namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // See Router.SkipClean(). This defines the flag for new routes. - skipClean bool + // If true, do not clear the request context after handling the request. - // This has no effect when go1.7+ is used, since the context is stored + // + // Deprecated: No effect when go1.7+ is used, since the context is stored // on the request itself. KeepContext bool - // see Router.UseEncodedPath(). This defines a flag for all routes. - useEncodedPath bool + // Slice of middlewares to be called after a match is found middlewares []middleware + + // configuration shared with `Route` + routeConf +} + +// common route configuration shared between `Router` and `Route` +type routeConf struct { + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + + // Manager for the variables from host and path. + regexp routeRegexpGroup + + // List of matchers. + matchers []matcher + + // The scheme used when building URLs. + buildScheme string + + buildVarsFunc BuildVarsFunc +} + +// returns an effective deep copy of `routeConf` +func copyRouteConf(r routeConf) routeConf { + c := r + + if r.regexp.path != nil { + c.regexp.path = copyRouteRegexp(r.regexp.path) + } + + if r.regexp.host != nil { + c.regexp.host = copyRouteRegexp(r.regexp.host) + } + + c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) + for _, q := range r.regexp.queries { + c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) + } + + c.matchers = make([]matcher, 0, len(r.matchers)) + for _, m := range r.matchers { + c.matchers = append(c.matchers, m) + } + + return c +} + +func copyRouteRegexp(r *routeRegexp) *routeRegexp { + c := *r + return &c } // Match attempts to match the given request against the router's registered routes. @@ -155,22 +209,18 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { handler = http.NotFoundHandler() } - if !r.KeepContext { - defer contextClear(req) - } - handler.ServeHTTP(w, req) } // Get returns a route registered with the given name. func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] + return r.namedRoutes[name] } // GetRoute returns a route registered with the given name. This method // was renamed to Get() and remains here for backwards compatibility. func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] + return r.namedRoutes[name] } // StrictSlash defines the trailing slash behavior for new routes. The initial @@ -221,51 +271,14 @@ func (r *Router) UseEncodedPath() *Router { return r } -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -func (r *Router) getBuildScheme() string { - if r.parent != nil { - return r.parent.getBuildScheme() - } - return "" -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -func (r *Router) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - return m -} - // ---------------------------------------------------------------------------- // Route factories // ---------------------------------------------------------------------------- // NewRoute registers an empty route. func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} + // initialize a route with a copy of the parent router's configuration + route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} r.routes = append(r.routes, route) return route } diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go index 5d4027e44d6..8ad57ac8226 100644 --- a/vendor/github.com/gorilla/mux/mux_test.go +++ b/vendor/github.com/gorilla/mux/mux_test.go @@ -48,15 +48,6 @@ type routeTest struct { } func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } tests := []routeTest{ { @@ -113,7 +104,15 @@ func TestHost(t *testing.T) { path: "", shouldMatch: false, }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, match with request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, { title: "Host route with port, wrong host in request header", route: new(Route).Host("aaa.bbb.ccc:1234"), @@ -123,6 +122,16 @@ func TestHost(t *testing.T) { path: "", shouldMatch: false, }, + { + title: "Host route with pattern, match with request header", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc:1{v2:(?:23|4)}"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:123"), + vars: map[string]string{"v1": "bbb", "v2": "23"}, + host: "aaa.bbb.ccc:123", + path: "", + hostTemplate: `aaa.{v1:[a-z]{3}}.ccc:1{v2:(?:23|4)}`, + shouldMatch: true, + }, { title: "Host route with pattern, match", route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), @@ -1193,7 +1202,6 @@ func TestSubRouter(t *testing.T) { subrouter3 := new(Route).PathPrefix("/foo").Subrouter() subrouter4 := new(Route).PathPrefix("/foo/bar").Subrouter() subrouter5 := new(Route).PathPrefix("/{category}").Subrouter() - tests := []routeTest{ { route: subrouter1.Path("/{v2:[a-z]+}"), @@ -1288,6 +1296,106 @@ func TestSubRouter(t *testing.T) { pathTemplate: `/{category}`, shouldMatch: true, }, + { + title: "Mismatch method specified on parent route", + route: new(Route).Methods("POST").PathPrefix("/foo").Subrouter().Path("/"), + request: newRequest("GET", "http://localhost/foo/"), + vars: map[string]string{}, + host: "", + path: "/foo/", + pathTemplate: `/foo/`, + shouldMatch: false, + }, + { + title: "Match method specified on parent route", + route: new(Route).Methods("POST").PathPrefix("/foo").Subrouter().Path("/"), + request: newRequest("POST", "http://localhost/foo/"), + vars: map[string]string{}, + host: "", + path: "/foo/", + pathTemplate: `/foo/`, + shouldMatch: true, + }, + { + title: "Mismatch scheme specified on parent route", + route: new(Route).Schemes("https").Subrouter().PathPrefix("/"), + request: newRequest("GET", "http://localhost/"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: false, + }, + { + title: "Match scheme specified on parent route", + route: new(Route).Schemes("http").Subrouter().PathPrefix("/"), + request: newRequest("GET", "http://localhost/"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: true, + }, + { + title: "No match header specified on parent route", + route: new(Route).Headers("X-Forwarded-Proto", "https").Subrouter().PathPrefix("/"), + request: newRequest("GET", "http://localhost/"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: false, + }, + { + title: "Header mismatch value specified on parent route", + route: new(Route).Headers("X-Forwarded-Proto", "https").Subrouter().PathPrefix("/"), + request: newRequestWithHeaders("GET", "http://localhost/", "X-Forwarded-Proto", "http"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: false, + }, + { + title: "Header match value specified on parent route", + route: new(Route).Headers("X-Forwarded-Proto", "https").Subrouter().PathPrefix("/"), + request: newRequestWithHeaders("GET", "http://localhost/", "X-Forwarded-Proto", "https"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: true, + }, + { + title: "Query specified on parent route not present", + route: new(Route).Headers("key", "foobar").Subrouter().PathPrefix("/"), + request: newRequest("GET", "http://localhost/"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: false, + }, + { + title: "Query mismatch value specified on parent route", + route: new(Route).Queries("key", "foobar").Subrouter().PathPrefix("/"), + request: newRequest("GET", "http://localhost/?key=notfoobar"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: false, + }, + { + title: "Query match value specified on subroute", + route: new(Route).Queries("key", "foobar").Subrouter().PathPrefix("/"), + request: newRequest("GET", "http://localhost/?key=foobar"), + vars: map[string]string{}, + host: "", + path: "/", + pathTemplate: `/`, + shouldMatch: true, + }, { title: "Build with scheme on parent router", route: new(Route).Schemes("ftp").Host("google.com").Subrouter().Path("/"), @@ -1512,12 +1620,16 @@ func TestWalkSingleDepth(t *testing.T) { func TestWalkNested(t *testing.T) { router := NewRouter() - g := router.Path("/g").Subrouter() - o := g.PathPrefix("/o").Subrouter() - r := o.PathPrefix("/r").Subrouter() - i := r.PathPrefix("/i").Subrouter() - l1 := i.PathPrefix("/l").Subrouter() - l2 := l1.PathPrefix("/l").Subrouter() + routeSubrouter := func(r *Route) (*Route, *Router) { + return r, r.Subrouter() + } + + gRoute, g := routeSubrouter(router.Path("/g")) + oRoute, o := routeSubrouter(g.PathPrefix("/o")) + rRoute, r := routeSubrouter(o.PathPrefix("/r")) + iRoute, i := routeSubrouter(r.PathPrefix("/i")) + l1Route, l1 := routeSubrouter(i.PathPrefix("/l")) + l2Route, l2 := routeSubrouter(l1.PathPrefix("/l")) l2.Path("/a") testCases := []struct { @@ -1525,12 +1637,12 @@ func TestWalkNested(t *testing.T) { ancestors []*Route }{ {"/g", []*Route{}}, - {"/g/o", []*Route{g.parent.(*Route)}}, - {"/g/o/r", []*Route{g.parent.(*Route), o.parent.(*Route)}}, - {"/g/o/r/i", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route)}}, - {"/g/o/r/i/l", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route)}}, - {"/g/o/r/i/l/l", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route), l1.parent.(*Route)}}, - {"/g/o/r/i/l/l/a", []*Route{g.parent.(*Route), o.parent.(*Route), r.parent.(*Route), i.parent.(*Route), l1.parent.(*Route), l2.parent.(*Route)}}, + {"/g/o", []*Route{gRoute}}, + {"/g/o/r", []*Route{gRoute, oRoute}}, + {"/g/o/r/i", []*Route{gRoute, oRoute, rRoute}}, + {"/g/o/r/i/l", []*Route{gRoute, oRoute, rRoute, iRoute}}, + {"/g/o/r/i/l/l", []*Route{gRoute, oRoute, rRoute, iRoute, l1Route}}, + {"/g/o/r/i/l/l/a", []*Route{gRoute, oRoute, rRoute, iRoute, l1Route, l2Route}}, } idx := 0 @@ -1563,8 +1675,8 @@ func TestWalkSubrouters(t *testing.T) { o.Methods("GET") o.Methods("PUT") - // all 4 routes should be matched, but final 2 routes do not have path templates - paths := []string{"/g", "/g/o", "", ""} + // all 4 routes should be matched + paths := []string{"/g", "/g/o", "/g/o", "/g/o"} idx := 0 err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { path := paths[idx] @@ -1745,7 +1857,11 @@ func testRoute(t *testing.T, test routeTest) { } } if query != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) + u, err := route.URL(mapToPairs(match.Vars)...) + if err != nil { + t.Errorf("(%v) erred while creating url: %v", test.title, err) + return + } if query != u.RawQuery { t.Errorf("(%v) URL query not equal: expected %v, got %v", test.title, query, u.RawQuery) return @@ -2332,6 +2448,273 @@ func testMethodsSubrouter(t *testing.T, test methodsSubrouterTest) { } } +func TestSubrouterMatching(t *testing.T) { + const ( + none, stdOnly, subOnly uint8 = 0, 1 << 0, 1 << 1 + both = subOnly | stdOnly + ) + + type request struct { + Name string + Request *http.Request + Flags uint8 + } + + cases := []struct { + Name string + Standard, Subrouter func(*Router) + Requests []request + }{ + { + "pathPrefix", + func(r *Router) { + r.PathPrefix("/before").PathPrefix("/after") + }, + func(r *Router) { + r.PathPrefix("/before").Subrouter().PathPrefix("/after") + }, + []request{ + {"no match final path prefix", newRequest("GET", "/after"), none}, + {"no match parent path prefix", newRequest("GET", "/before"), none}, + {"matches append", newRequest("GET", "/before/after"), both}, + {"matches as prefix", newRequest("GET", "/before/after/1234"), both}, + }, + }, + { + "path", + func(r *Router) { + r.Path("/before").Path("/after") + }, + func(r *Router) { + r.Path("/before").Subrouter().Path("/after") + }, + []request{ + {"no match subroute path", newRequest("GET", "/after"), none}, + {"no match parent path", newRequest("GET", "/before"), none}, + {"no match as prefix", newRequest("GET", "/before/after/1234"), none}, + {"no match append", newRequest("GET", "/before/after"), none}, + }, + }, + { + "host", + func(r *Router) { + r.Host("before.com").Host("after.com") + }, + func(r *Router) { + r.Host("before.com").Subrouter().Host("after.com") + }, + []request{ + {"no match before", newRequestHost("GET", "/", "before.com"), none}, + {"no match other", newRequestHost("GET", "/", "other.com"), none}, + {"matches after", newRequestHost("GET", "/", "after.com"), none}, + }, + }, + { + "queries variant keys", + func(r *Router) { + r.Queries("foo", "bar").Queries("cricket", "baseball") + }, + func(r *Router) { + r.Queries("foo", "bar").Subrouter().Queries("cricket", "baseball") + }, + []request{ + {"matches with all", newRequest("GET", "/?foo=bar&cricket=baseball"), both}, + {"matches with more", newRequest("GET", "/?foo=bar&cricket=baseball&something=else"), both}, + {"no match with none", newRequest("GET", "/"), none}, + {"no match with some", newRequest("GET", "/?cricket=baseball"), none}, + }, + }, + { + "queries overlapping keys", + func(r *Router) { + r.Queries("foo", "bar").Queries("foo", "baz") + }, + func(r *Router) { + r.Queries("foo", "bar").Subrouter().Queries("foo", "baz") + }, + []request{ + {"no match old value", newRequest("GET", "/?foo=bar"), none}, + {"no match diff value", newRequest("GET", "/?foo=bak"), none}, + {"no match with none", newRequest("GET", "/"), none}, + {"matches override", newRequest("GET", "/?foo=baz"), none}, + }, + }, + { + "header variant keys", + func(r *Router) { + r.Headers("foo", "bar").Headers("cricket", "baseball") + }, + func(r *Router) { + r.Headers("foo", "bar").Subrouter().Headers("cricket", "baseball") + }, + []request{ + { + "matches with all", + newRequestWithHeaders("GET", "/", "foo", "bar", "cricket", "baseball"), + both, + }, + { + "matches with more", + newRequestWithHeaders("GET", "/", "foo", "bar", "cricket", "baseball", "something", "else"), + both, + }, + {"no match with none", newRequest("GET", "/"), none}, + {"no match with some", newRequestWithHeaders("GET", "/", "cricket", "baseball"), none}, + }, + }, + { + "header overlapping keys", + func(r *Router) { + r.Headers("foo", "bar").Headers("foo", "baz") + }, + func(r *Router) { + r.Headers("foo", "bar").Subrouter().Headers("foo", "baz") + }, + []request{ + {"no match old value", newRequestWithHeaders("GET", "/", "foo", "bar"), none}, + {"no match diff value", newRequestWithHeaders("GET", "/", "foo", "bak"), none}, + {"no match with none", newRequest("GET", "/"), none}, + {"matches override", newRequestWithHeaders("GET", "/", "foo", "baz"), none}, + }, + }, + { + "method", + func(r *Router) { + r.Methods("POST").Methods("GET") + }, + func(r *Router) { + r.Methods("POST").Subrouter().Methods("GET") + }, + []request{ + {"matches before", newRequest("POST", "/"), none}, + {"no match other", newRequest("HEAD", "/"), none}, + {"matches override", newRequest("GET", "/"), none}, + }, + }, + { + "schemes", + func(r *Router) { + r.Schemes("http").Schemes("https") + }, + func(r *Router) { + r.Schemes("http").Subrouter().Schemes("https") + }, + []request{ + {"matches overrides", newRequest("GET", "https://www.example.com/"), none}, + {"matches original", newRequest("GET", "http://www.example.com/"), none}, + {"no match other", newRequest("GET", "ftp://www.example.com/"), none}, + }, + }, + } + + // case -> request -> router + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + for _, req := range c.Requests { + t.Run(req.Name, func(t *testing.T) { + for _, v := range []struct { + Name string + Config func(*Router) + Expected bool + }{ + {"subrouter", c.Subrouter, (req.Flags & subOnly) != 0}, + {"standard", c.Standard, (req.Flags & stdOnly) != 0}, + } { + r := NewRouter() + v.Config(r) + if r.Match(req.Request, &RouteMatch{}) != v.Expected { + if v.Expected { + t.Errorf("expected %v match", v.Name) + } else { + t.Errorf("expected %v no match", v.Name) + } + } + } + }) + } + }) + } +} + +// verify that copyRouteConf copies fields as expected. +func Test_copyRouteConf(t *testing.T) { + var ( + m MatcherFunc = func(*http.Request, *RouteMatch) bool { + return true + } + b BuildVarsFunc = func(i map[string]string) map[string]string { + return i + } + r, _ = newRouteRegexp("hi", regexpTypeHost, routeRegexpOptions{}) + ) + + tests := []struct { + name string + args routeConf + want routeConf + }{ + { + "empty", + routeConf{}, + routeConf{}, + }, + { + "full", + routeConf{ + useEncodedPath: true, + strictSlash: true, + skipClean: true, + regexp: routeRegexpGroup{host: r, path: r, queries: []*routeRegexp{r}}, + matchers: []matcher{m}, + buildScheme: "https", + buildVarsFunc: b, + }, + routeConf{ + useEncodedPath: true, + strictSlash: true, + skipClean: true, + regexp: routeRegexpGroup{host: r, path: r, queries: []*routeRegexp{r}}, + matchers: []matcher{m}, + buildScheme: "https", + buildVarsFunc: b, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // special case some incomparable fields of routeConf before delegating to reflect.DeepEqual + got := copyRouteConf(tt.args) + + // funcs not comparable, just compare length of slices + if len(got.matchers) != len(tt.want.matchers) { + t.Errorf("matchers different lengths: %v %v", len(got.matchers), len(tt.want.matchers)) + } + got.matchers, tt.want.matchers = nil, nil + + // deep equal treats nil slice differently to empty slice so check for zero len first + { + bothZero := len(got.regexp.queries) == 0 && len(tt.want.regexp.queries) == 0 + if !bothZero && !reflect.DeepEqual(got.regexp.queries, tt.want.regexp.queries) { + t.Errorf("queries unequal: %v %v", got.regexp.queries, tt.want.regexp.queries) + } + got.regexp.queries, tt.want.regexp.queries = nil, nil + } + + // funcs not comparable, just compare nullity + if (got.buildVarsFunc == nil) != (tt.want.buildVarsFunc == nil) { + t.Errorf("build vars funcs unequal: %v %v", got.buildVarsFunc == nil, tt.want.buildVarsFunc == nil) + } + got.buildVarsFunc, tt.want.buildVarsFunc = nil, nil + + // finish the deal + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("route confs unequal: %v %v", got, tt.want) + } + }) + } +} + // mapToPairs converts a string map to a slice of string pairs func mapToPairs(m map[string]string) []string { var i int @@ -2406,3 +2789,28 @@ func newRequest(method, url string) *http.Request { } return req } + +// create a new request with the provided headers +func newRequestWithHeaders(method, url string, headers ...string) *http.Request { + req := newRequest(method, url) + + if len(headers)%2 != 0 { + panic(fmt.Sprintf("Expected headers length divisible by 2 but got %v", len(headers))) + } + + for i := 0; i < len(headers); i += 2 { + req.Header.Set(headers[i], headers[i+1]) + } + + return req +} + +// newRequestHost a new request with a method, url, and host header +func newRequestHost(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req +} diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index b92d59f208e..f2528867563 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -267,7 +267,7 @@ type routeRegexpGroup struct { } // setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { +func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { host := getHost(req) @@ -312,17 +312,13 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } // getHost tries its best to return the request host. +// According to section 14.23 of RFC 2616 the Host header +// can include the port number if the default value of 80 is not used. func getHost(r *http.Request) string { if r.URL.IsAbs() { return r.URL.Host } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - + return r.Host } func extractVars(input string, matches []int, names []string, output map[string]string) { diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index c8bb5c7e3a0..a1970966c91 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -15,24 +15,8 @@ import ( // Route stores information to match a request and build URLs. type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute // Request handler for the route. handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - // The scheme used when building URLs. - buildScheme string // If true, this route never matches: it is only used to build URLs. buildOnly bool // The name used to build URLs. @@ -40,7 +24,11 @@ type Route struct { // Error resulted from building a route. err error - buildVarsFunc BuildVarsFunc + // "global" reference to all named routes + namedRoutes map[string]*Route + + // config possibly passed in from `Router` + routeConf } // SkipClean reports whether path cleaning is enabled for this route via @@ -55,6 +43,11 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { return false } + // Set MatchErr to nil to prevent + // subsequent matching subrouters from failing to run middleware. + // If not reset, the middleware would see a non-nil MatchErr and be skipped, + // even when there was a matching route. + match.MatchErr = nil var matchErr error // Match everything. @@ -93,9 +86,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { } // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } + r.regexp.setMatch(req, match, r) return true } @@ -145,7 +136,7 @@ func (r *Route) Name(name string) *Route { } if r.err == nil { r.name = name - r.getNamedRoutes()[name] = r + r.namedRoutes[name] = r } return r } @@ -177,7 +168,6 @@ func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { if r.err != nil { return r.err } - r.regexp = r.getRegexpGroup() if typ == regexpTypePath || typ == regexpTypePrefix { if len(tpl) > 0 && tpl[0] != '/' { return fmt.Errorf("mux: path must start with a slash, got %q", tpl) @@ -424,7 +414,7 @@ func (r *Route) Schemes(schemes ...string) *Route { for k, v := range schemes { schemes[k] = strings.ToLower(v) } - if r.buildScheme == "" && len(schemes) > 0 { + if len(schemes) > 0 { r.buildScheme = schemes[0] } return r.addMatcher(schemeMatcher(schemes)) @@ -439,7 +429,15 @@ type BuildVarsFunc func(map[string]string) map[string]string // BuildVarsFunc adds a custom function to be used to modify build variables // before a route's URL is built. func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - r.buildVarsFunc = f + if r.buildVarsFunc != nil { + // compose the old and new functions + old := r.buildVarsFunc + r.buildVarsFunc = func(m map[string]string) map[string]string { + return f(old(m)) + } + } else { + r.buildVarsFunc = f + } return r } @@ -458,7 +456,8 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} + // initialize a subrouter with a copy of the parent route's configuration + router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} r.addMatcher(router) return router } @@ -502,9 +501,6 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } values, err := r.prepareVars(pairs...) if err != nil { return nil, err @@ -516,8 +512,8 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { return nil, err } scheme = "http" - if s := r.getBuildScheme(); s != "" { - scheme = s + if r.buildScheme != "" { + scheme = r.buildScheme } } if r.regexp.path != nil { @@ -547,7 +543,7 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.host == nil { + if r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } values, err := r.prepareVars(pairs...) @@ -562,8 +558,8 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { Scheme: "http", Host: host, } - if s := r.getBuildScheme(); s != "" { - u.Scheme = s + if r.buildScheme != "" { + u.Scheme = r.buildScheme } return u, nil } @@ -575,7 +571,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.path == nil { + if r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } values, err := r.prepareVars(pairs...) @@ -600,7 +596,7 @@ func (r *Route) GetPathTemplate() (string, error) { if r.err != nil { return "", r.err } - if r.regexp == nil || r.regexp.path == nil { + if r.regexp.path == nil { return "", errors.New("mux: route doesn't have a path") } return r.regexp.path.template, nil @@ -614,7 +610,7 @@ func (r *Route) GetPathRegexp() (string, error) { if r.err != nil { return "", r.err } - if r.regexp == nil || r.regexp.path == nil { + if r.regexp.path == nil { return "", errors.New("mux: route does not have a path") } return r.regexp.path.regexp.String(), nil @@ -629,7 +625,7 @@ func (r *Route) GetQueriesRegexp() ([]string, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.queries == nil { + if r.regexp.queries == nil { return nil, errors.New("mux: route doesn't have queries") } var queries []string @@ -648,7 +644,7 @@ func (r *Route) GetQueriesTemplates() ([]string, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.queries == nil { + if r.regexp.queries == nil { return nil, errors.New("mux: route doesn't have queries") } var queries []string @@ -683,7 +679,7 @@ func (r *Route) GetHostTemplate() (string, error) { if r.err != nil { return "", r.err } - if r.regexp == nil || r.regexp.host == nil { + if r.regexp.host == nil { return "", errors.New("mux: route doesn't have a host") } return r.regexp.host.template, nil @@ -700,64 +696,8 @@ func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { } func (r *Route) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } if r.buildVarsFunc != nil { m = r.buildVarsFunc(m) } return m } - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getBuildScheme() string - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup - buildVars(map[string]string) map[string]string -} - -func (r *Route) getBuildScheme() string { - if r.buildScheme != "" { - return r.buildScheme - } - if r.parent != nil { - return r.parent.getBuildScheme() - } - return "" -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 00000000000..e1c269d34a1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1 @@ +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/compress/.travis.yml b/vendor/github.com/klauspost/compress/.travis.yml new file mode 100644 index 00000000000..00557cec707 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.travis.yml @@ -0,0 +1,27 @@ +language: go + +sudo: false + +os: + - linux + - osx + +go: + - 1.9.x + - 1.10.x + - master + +install: + - go get -t ./... + +script: + - diff <(gofmt -d .) <(printf "") + - go test -v -cpu=2 ./... + - go test -cpu=2 -tags=noasm ./... + - go test -cpu=1,2,4 -short -race ./... + - go test -cpu=2,4 -short -race -tags=noasm ./... + +matrix: + allow_failures: + - go: 'master' + fast_finish: true diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 00000000000..280d54890cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,160 @@ +# compress + +This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages. + +It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level. + +* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). +* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). +* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). +* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) + +[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +# usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | +|--------------------|-----------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | +| `archive/zip` | `github.com/klauspost/compress/zip` | +| `compress/flate` | `github.com/klauspost/compress/flate` | + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard libary at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard libary drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +# linear time compression (huffman only) + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + + +# snappy package + +The standard snappy package has now been improved. This repo contains a copy of the snappy repo. + +I would advise to use the standard package: https://github.com/golang/snappy + + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 00000000000..59bf962b8ba --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,63 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} diff --git a/vendor/github.com/klauspost/compress/compressible_test.go b/vendor/github.com/klauspost/compress/compressible_test.go new file mode 100644 index 00000000000..af508dfcb86 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible_test.go @@ -0,0 +1,121 @@ +package compress + +import ( + "crypto/rand" + "encoding/base32" + "testing" +) + +func BenchmarkEstimate(b *testing.B) { + b.ReportAllocs() + // (predictable, low entropy distibution) + b.Run("zeroes-5k", func(b *testing.B) { + var testData = make([]byte, 5000) + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) + + // (predictable, high entropy distibution) + b.Run("predictable-5k", func(b *testing.B) { + var testData = make([]byte, 5000) + for i := range testData { + testData[i] = byte(float64(i) / float64(len(testData)) * 256) + } + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) + + // (not predictable, high entropy distibution) + b.Run("random-500b", func(b *testing.B) { + var testData = make([]byte, 500) + rand.Read(testData) + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) + + // (not predictable, high entropy distibution) + b.Run("random-5k", func(b *testing.B) { + var testData = make([]byte, 5000) + rand.Read(testData) + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) + + // (not predictable, high entropy distibution) + b.Run("random-50k", func(b *testing.B) { + var testData = make([]byte, 50000) + rand.Read(testData) + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) + + // (not predictable, high entropy distibution) + b.Run("random-500k", func(b *testing.B) { + var testData = make([]byte, 500000) + rand.Read(testData) + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) + + // (not predictable, medium entropy distibution) + b.Run("base-32-5k", func(b *testing.B) { + var testData = make([]byte, 5000) + rand.Read(testData) + s := base32.StdEncoding.EncodeToString(testData) + testData = []byte(s) + testData = testData[:5000] + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) + // (medium predictable, medium entropy distibution) + b.Run("text", func(b *testing.B) { + var testData = []byte(`If compression is done per-chunk, care should be taken that it doesn't leave restic backups open to watermarking/fingerprinting attacks. +This is essentially the same problem we discussed related to fingerprinting the CDC deduplication process: +With "naive" CDC, a "known plaintext" file can be verified to exist within the backup if the size of individual blocks can be observed by an attacker, by using CDC on the file in parallel and comparing the resulting amount of chunks and individual chunk lengths. +As discussed earlier, this can be somewhat mitigated by salting the CDC algorithm with a secret value, as done in attic. +With salted CDC, I assume compression would happen on each individual chunk, after splitting the problematic file into chunks. Restic chunks are in the range of 512 KB to 8 MB (but not evenly distributed - right?). +Attacker knows that the CDC algorithm uses a secret salt, so the attacker generates a range of chunks consisting of the first 512 KB to 8 MB of the file, one for each valid chunk length. The attacker is also able to determine the lengths of compressed chunks. +The attacker then compresses that chunk using the compression algorithm. +The attacker compares the lengths of the resulting chunks to the first chunk in the restic backup sets. +IF a matching block length is found, the attacker repeats the exercise with the next chunk, and the next chunk, and the next chunk, ... and the next chunk. +It is my belief that with sufficiently large files, and considering the fact that the CDC algorithm is "biased" (in lack of better of words) towards generating blocks of about 1 MB, this would be sufficient to ascertain whether or not a certain large file exists in the backup. +AS always, a paranoid and highly unscientific stream of consciousness. +Thoughts?`) + testData = append(testData, testData...) + testData = append(testData, testData...) + b.SetBytes(int64(len(testData))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Estimate(testData) + } + b.Log(Estimate(testData)) + }) +} diff --git a/vendor/github.com/klauspost/compress/flate/asm_test.go b/vendor/github.com/klauspost/compress/flate/asm_test.go new file mode 100644 index 00000000000..40bf210f5f6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/asm_test.go @@ -0,0 +1,193 @@ +// Copyright 2015, Klaus Post, see LICENSE for details. + +//+build amd64 + +package flate + +import ( + "math/rand" + "testing" +) + +func TestCRC(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping CRC test, no SSE 4.2 available") + } + for _, x := range deflateTests { + y := x.out + if len(y) >= minMatchLength { + t.Logf("In: %v, Out:0x%08x", y[0:minMatchLength], crc32sse(y[0:minMatchLength])) + } + } +} + +func TestCRCBulk(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping CRC test, no SSE 4.2 available") + } + for _, x := range deflateTests { + y := x.out + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + if !testing.Short() { + y = append(y, y...) + y = append(y, y...) + } + y = append(y, 1) + if len(y) >= minMatchLength { + for j := len(y) - 1; j >= 4; j-- { + + // Create copy, so we easier detect of-of-bound reads + test := make([]byte, j) + test2 := make([]byte, j) + copy(test, y[:j]) + copy(test2, y[:j]) + + // We allocate one more than we need to test for unintentional overwrites + dst := make([]uint32, j-3+1) + ref := make([]uint32, j-3+1) + for i := range dst { + dst[i] = uint32(i + 100) + ref[i] = uint32(i + 101) + } + // Last entry must NOT be overwritten. + dst[j-3] = 0x1234 + ref[j-3] = 0x1234 + + // Do two encodes we can compare + crc32sseAll(test, dst) + crc32sseAll(test2, ref) + + // Check all values + for i, got := range dst { + if i == j-3 { + if dst[i] != 0x1234 { + t.Fatalf("end of expected dst overwritten, was %08x", uint32(dst[i])) + } + continue + } + expect := crc32sse(y[i : i+4]) + if got != expect && got == uint32(i)+100 { + t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, uint32(expect)) + } else if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, uint32(got), uint32(expect)) + } + expect = ref[i] + if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect) + } + } + } + } + } +} + +func TestMatchLen(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping Matchlen test, no SSE 4.2 available") + } + // Maximum length tested + var maxLen = 512 + + // Skips per iteration + is, js, ks := 3, 2, 1 + if testing.Short() { + is, js, ks = 7, 5, 3 + } + + a := make([]byte, maxLen) + b := make([]byte, maxLen) + bb := make([]byte, maxLen) + rand.Seed(1) + for i := range a { + a[i] = byte(rand.Int63()) + b[i] = byte(rand.Int63()) + } + + // Test different lengths + for i := 0; i < maxLen; i += is { + // Test different dst offsets. + for j := 0; j < maxLen-1; j += js { + copy(bb, b) + // Test different src offsets + for k := i - 1; k >= 0; k -= ks { + copy(bb[j:], a[k:i]) + maxTest := maxLen - j + if maxTest > maxLen-k { + maxTest = maxLen - k + } + got := matchLenSSE4(a[k:], bb[j:], maxTest) + expect := matchLenReference(a[k:], bb[j:], maxTest) + if got > maxTest || got < 0 { + t.Fatalf("unexpected result %d (len:%d, src offset: %d, dst offset:%d)", got, maxTest, k, j) + } + if got != expect { + t.Fatalf("Mismatch, expected %d, got %d", expect, got) + } + } + } + } +} + +// matchLenReference is a reference matcher. +func matchLenReference(a, b []byte, max int) int { + for i := 0; i < max; i++ { + if a[i] != b[i] { + return i + } + } + return max +} + +func TestHistogram(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping Matchlen test, no SSE 4.2 available") + } + // Maximum length tested + const maxLen = 65536 + var maxOff = 8 + + // Skips per iteration + is, js := 5, 3 + if testing.Short() { + is, js = 9, 1 + maxOff = 1 + } + + a := make([]byte, maxLen+maxOff) + rand.Seed(1) + for i := range a { + a[i] = byte(rand.Int63()) + } + + // Test different lengths + for i := 0; i <= maxLen; i += is { + // Test different offsets + for j := 0; j < maxOff; j += js { + var got [256]int32 + var reference [256]int32 + + histogram(a[j:i+j], got[:]) + histogramReference(a[j:i+j], reference[:]) + for k := range got { + if got[k] != reference[k] { + t.Fatalf("mismatch at len:%d, offset:%d, value %d: (got) %d != %d (expected)", i, j, k, got[k], reference[k]) + } + } + } + } +} + +// histogramReference is a reference +func histogramReference(b []byte, h []int32) { + if len(h) < 256 { + panic("Histogram too small") + } + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/copy.go b/vendor/github.com/klauspost/compress/flate/copy.go new file mode 100644 index 00000000000..a3200a8f49e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// forwardCopy is like the built-in copy function except that it always goes +// forward from the start, even if the dst and src overlap. +// It is equivalent to: +// for i := 0; i < n; i++ { +// mem[dst+i] = mem[src+i] +// } +func forwardCopy(mem []byte, dst, src, n int) { + if dst <= src { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + for { + if dst >= src+n { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + // There is some forward overlap. The destination + // will be filled with a repeated pattern of mem[src:src+k]. + // We copy one instance of the pattern here, then repeat. + // Each time around this loop k will double. + k := dst - src + copy(mem[dst:dst+k], mem[src:src+k]) + n -= k + dst += k + } +} diff --git a/vendor/github.com/klauspost/compress/flate/copy_test.go b/vendor/github.com/klauspost/compress/flate/copy_test.go new file mode 100644 index 00000000000..2011b1547c9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy_test.go @@ -0,0 +1,54 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "testing" +) + +func TestForwardCopy(t *testing.T) { + testCases := []struct { + dst0, dst1 int + src0, src1 int + want string + }{ + {0, 9, 0, 9, "012345678"}, + {0, 5, 4, 9, "45678"}, + {4, 9, 0, 5, "01230"}, + {1, 6, 3, 8, "34567"}, + {3, 8, 1, 6, "12121"}, + {0, 9, 3, 6, "345"}, + {3, 6, 0, 9, "012"}, + {1, 6, 0, 9, "00000"}, + {0, 4, 7, 8, "7"}, + {0, 1, 6, 8, "6"}, + {4, 4, 6, 9, ""}, + {2, 8, 6, 6, ""}, + {0, 0, 0, 0, ""}, + } + for _, tc := range testCases { + b := []byte("0123456789") + n := tc.dst1 - tc.dst0 + if tc.src1-tc.src0 < n { + n = tc.src1 - tc.src0 + } + forwardCopy(b, tc.dst0, tc.src0, n) + got := string(b[tc.dst0 : tc.dst0+n]) + if got != tc.want { + t.Errorf("dst=b[%d:%d], src=b[%d:%d]: got %q, want %q", + tc.dst0, tc.dst1, tc.src0, tc.src1, got, tc.want) + } + // Check that the bytes outside of dst[:n] were not modified. + for i, x := range b { + if i >= tc.dst0 && i < tc.dst0+n { + continue + } + if int(x) != '0'+i { + t.Errorf("dst=b[%d:%d], src=b[%d:%d]: copy overrun at b[%d]: got '%c', want '%c'", + tc.dst0, tc.dst1, tc.src0, tc.src1, i, x, '0'+i) + } + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go new file mode 100644 index 00000000000..8298d309aef --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go @@ -0,0 +1,42 @@ +//+build !noasm +//+build !appengine +//+build !gccgo + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +import ( + "github.com/klauspost/cpuid" +) + +// crc32sse returns a hash for the first 4 bytes of the slice +// len(a) must be >= 4. +//go:noescape +func crc32sse(a []byte) uint32 + +// crc32sseAll calculates hashes for each 4-byte set in a. +// dst must be east len(a) - 4 in size. +// The size is not checked by the assembly. +//go:noescape +func crc32sseAll(a []byte, dst []uint32) + +// matchLenSSE4 returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +// +// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. +// +//go:noescape +func matchLenSSE4(a, b []byte, max int) int + +// histogram accumulates a histogram of b in h. +// h must be at least 256 entries in length, +// and must be cleared before calling this function. +//go:noescape +func histogram(b []byte, h []int32) + +// Detect SSE 4.2 feature. +func init() { + useSSE42 = cpuid.CPU.SSE42() +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s new file mode 100644 index 00000000000..a7994372702 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s @@ -0,0 +1,214 @@ +//+build !noasm +//+build !appengine +//+build !gccgo + +// Copyright 2015, Klaus Post, see LICENSE for details. + +// func crc32sse(a []byte) uint32 +TEXT ·crc32sse(SB), 4, $0 + MOVQ a+0(FP), R10 + XORQ BX, BX + + // CRC32 dword (R10), EBX + BYTE $0xF2; BYTE $0x41; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0x1a + + MOVL BX, ret+24(FP) + RET + +// func crc32sseAll(a []byte, dst []uint32) +TEXT ·crc32sseAll(SB), 4, $0 + MOVQ a+0(FP), R8 // R8: src + MOVQ a_len+8(FP), R10 // input length + MOVQ dst+24(FP), R9 // R9: dst + SUBQ $4, R10 + JS end + JZ one_crc + MOVQ R10, R13 + SHRQ $2, R10 // len/4 + ANDQ $3, R13 // len&3 + XORQ BX, BX + ADDQ $1, R13 + TESTQ R10, R10 + JZ rem_loop + +crc_loop: + MOVQ (R8), R11 + XORQ BX, BX + XORQ DX, DX + XORQ DI, DI + MOVQ R11, R12 + SHRQ $8, R11 + MOVQ R12, AX + MOVQ R11, CX + SHRQ $16, R12 + SHRQ $16, R11 + MOVQ R12, SI + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + // CRC32 ECX, EDX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd1 + + // CRC32 ESI, EDI + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xfe + MOVL BX, (R9) + MOVL DX, 4(R9) + MOVL DI, 8(R9) + + XORQ BX, BX + MOVL R11, AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + MOVL BX, 12(R9) + + ADDQ $16, R9 + ADDQ $4, R8 + XORQ BX, BX + SUBQ $1, R10 + JNZ crc_loop + +rem_loop: + MOVL (R8), AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + MOVL BX, (R9) + ADDQ $4, R9 + ADDQ $1, R8 + XORQ BX, BX + SUBQ $1, R13 + JNZ rem_loop + +end: + RET + +one_crc: + MOVQ $1, R13 + XORQ BX, BX + JMP rem_loop + +// func matchLenSSE4(a, b []byte, max int) int +TEXT ·matchLenSSE4(SB), 4, $0 + MOVQ a_base+0(FP), SI + MOVQ b_base+24(FP), DI + MOVQ DI, DX + MOVQ max+48(FP), CX + +cmp8: + // As long as we are 8 or more bytes before the end of max, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ CX, $8 + JLT cmp1 + MOVQ (SI), AX + MOVQ (DI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, SI + ADDQ $8, DI + SUBQ $8, CX + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, DI + + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +cmp1: + // In the slices' tail, compare 1 byte at a time. + CMPQ CX, $0 + JEQ matchLenEnd + MOVB (SI), AX + MOVB (DI), BX + CMPB AX, BX + JNE matchLenEnd + ADDQ $1, SI + ADDQ $1, DI + SUBQ $1, CX + JMP cmp1 + +matchLenEnd: + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +// func histogram(b []byte, h []int32) +TEXT ·histogram(SB), 4, $0 + MOVQ b+0(FP), SI // SI: &b + MOVQ b_len+8(FP), R9 // R9: len(b) + MOVQ h+24(FP), DI // DI: Histogram + MOVQ R9, R8 + SHRQ $3, R8 + JZ hist1 + XORQ R11, R11 + +loop_hist8: + MOVQ (SI), R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + INCL (DI)(R10*4) + + ADDQ $8, SI + DECQ R8 + JNZ loop_hist8 + +hist1: + ANDQ $7, R9 + JZ end_hist + XORQ R10, R10 + +loop_hist1: + MOVB (SI), R10 + INCL (DI)(R10*4) + INCQ SI + DECQ R9 + JNZ loop_hist1 + +end_hist: + RET diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go new file mode 100644 index 00000000000..dcf43bd50a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go @@ -0,0 +1,35 @@ +//+build !amd64 noasm appengine gccgo + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +func init() { + useSSE42 = false +} + +// crc32sse should never be called. +func crc32sse(a []byte) uint32 { + panic("no assembler") +} + +// crc32sseAll should never be called. +func crc32sseAll(a []byte, dst []uint32) { + panic("no assembler") +} + +// matchLenSSE4 should never be called. +func matchLenSSE4(a, b []byte, max int) int { + panic("no assembler") + return 0 +} + +// histogram accumulates a histogram of b in h. +// +// len(h) must be >= 256, and h's elements must be all zeroes. +func histogram(b []byte, h []int32) { + h = h[:256] + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 00000000000..9e6e7ff0cfe --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1353 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we put into a single flat block, just too + // stop things from getting too large. + maxFlateBlockTokens = 1 << 14 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 24 + + skipNever = math.MaxInt32 +) + +var useSSE42 bool + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-4 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + // For levels 5-6 we don't bother trying with lazy matches. + // Lazy matching is at least 30% slower, with 1.5% increase. + {6, 0, 12, 8, 12, 5}, + {8, 0, 24, 16, 16, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 8, 24, 16, skipNever, 7}, + {10, 16, 24, 64, skipNever, 8}, + {32, 258, 258, 4096, skipNever, 9}, +} + +type compressor struct { + compressionLevel + + w *huffmanBitWriter + bulkHasher func([]byte, []uint32) + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + sync bool // requesting flush + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + chainHead int + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 + hashOffset int + + // input window: unprocessed data is window[index:windowEnd] + index int + window []byte + windowEnd int + blockStart int // window index where current tokens start + byteAvailable bool // if true, still need to process window[index-1]. + + // queued output tokens + tokens tokens + + // deflate state + length int + offset int + hash uint32 + maxInsertIndex int + err error + ii uint16 // position of last match, intended to overflow to reset. + + snap snappyEnc + hashMatch [maxMatchLength + minMatchLength]uint32 +} + +func (d *compressor) fillDeflate(b []byte) int { + if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + copy(d.window[:], d.window[windowSize:2*windowSize]) + d.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + d.hashOffset += windowSize + if d.hashOffset > maxHashOffset { + delta := d.hashOffset - 1 + d.hashOffset -= delta + d.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range d.hashPrev[:] { + if int(v) > delta { + d.hashPrev[i] = uint32(int(v) - delta) + } else { + d.hashPrev[i] = 0 + } + } + for i, v := range d.hashHead[:] { + if int(v) > delta { + d.hashHead[i] = uint32(int(v) - delta) + } else { + d.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + d.w.writeBlock(tok.tokens[:tok.n], eof, window) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) + } + } else { + d.w.writeBlock(tok.tokens[:tok.n], eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only mode, + // use constant or Snappy compression. + switch d.compressionLevel.level { + case 0, 1, 2: + return + } + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := d.hashMatch[:dstSize] + d.bulkHasher(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + // Update window information. + d.windowEnd += n + d.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLenSSE4(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +const hashmul = 0x1e35a7bd + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < minMatchLength { + return + } + hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + dst[0] = (hb * hashmul) >> (32 - hashBits) + end := len(b) - minMatchLength + 1 + for i := 1; i < end; i++ { + hb = (hb << 8) | uint32(b[i+3]) + dst[i] = (hb * hashmul) >> (32 - hashBits) + } +} + +// matchLen returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +func matchLen(a, b []byte, max int) int { + a = a[:max] + b = b[:len(a)] + for i, av := range a { + if b[i] != av { + return i + } + } + return max +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.hashOffset = 1 + d.length = minMatchLength - 1 + d.offset = 0 + d.byteAvailable = false + d.index = 0 + d.hash = 0 + d.chainHead = -1 + d.bulkHasher = bulkHash4 + if useSSE42 { + d.bulkHasher = crc32sseAll + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazy +func (d *compressor) deflate() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 5) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazySSE +func (d *compressor) deflateSSE() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>5) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazySSE() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 6) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeSnappy() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < maxStoreBlockSize { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.tokens.n = 0 + d.windowEnd = 0 + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 + d.snap.Reset() + return + } + } + + d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if int(d.tokens.n) == d.windowEnd { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + d.step(d) + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level >= 1 && level <= 4: + d.snap = newSnappy(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeSnappy + case level == DefaultCompression: + level = 5 + fallthrough + case 5 <= level && level <= 9: + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + if d.fastSkipHashing == skipNever { + if useSSE42 { + d.step = (*compressor).deflateLazySSE + } else { + d.step = (*compressor).deflateLazy + } + } else { + if useSSE42 { + d.step = (*compressor).deflateSSE + } else { + d.step = (*compressor).deflate + + } + } + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.snap != nil { + d.snap.Reset() + d.windowEnd = 0 + d.tokens.n = 0 + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + d.chainHead = -1 + for i := range d.hashHead { + d.hashHead[i] = 0 + } + for i := range d.hashPrev { + d.hashPrev[i] = 0 + } + d.hashOffset = 1 + d.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.n = 0 + d.length = minMatchLength - 1 + d.offset = 0 + d.hash = 0 + d.ii = 0 + d.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + dw := &dictWriter{w} + zw, err := NewWriter(dw, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +type dictWriter struct { + w io.Writer +} + +func (w *dictWriter) Write(b []byte) (n int, err error) { + return w.w.Write(b) +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if dw, ok := w.d.w.writer.(*dictWriter); ok { + // w was created with NewWriterDict + dw.w = dst + w.d.reset(dw) + w.d.fillWindow(w.dict) + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate_test.go b/vendor/github.com/klauspost/compress/flate/deflate_test.go new file mode 100644 index 00000000000..ff62a607afd --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate_test.go @@ -0,0 +1,648 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + "sync" + "testing" +) + +type deflateTest struct { + in []byte + level int + out []byte +} + +type deflateInflateTest struct { + in []byte +} + +type reverseBitsTest struct { + in uint16 + bitCount uint8 + out uint16 +} + +var deflateTests = []*deflateTest{ + {[]byte{}, 0, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + + {[]byte{0x11}, 0, []byte{0, 1, 0, 254, 255, 17, 1, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, 0, []byte{0, 2, 0, 253, 255, 17, 18, 1, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 0, + []byte{0, 8, 0, 247, 255, 17, 17, 17, 17, 17, 17, 17, 17, 1, 0, 0, 255, 255}, + }, + {[]byte{}, 1, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, BestCompression, []byte{18, 20, 2, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, BestCompression, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}}, + {[]byte{}, 9, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, 9, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, 9, []byte{18, 20, 2, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 9, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}}, +} + +var deflateInflateTests = []*deflateInflateTest{ + {[]byte{}}, + {[]byte{0x11}}, + {[]byte{0x11, 0x12}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}}, + {[]byte{0x11, 0x10, 0x13, 0x41, 0x21, 0x21, 0x41, 0x13, 0x87, 0x78, 0x13}}, + {largeDataChunk()}, +} + +var reverseBitsTests = []*reverseBitsTest{ + {1, 1, 1}, + {1, 2, 2}, + {1, 3, 4}, + {1, 4, 8}, + {1, 5, 16}, + {17, 5, 17}, + {257, 9, 257}, + {29, 5, 23}, +} + +func largeDataChunk() []byte { + result := make([]byte, 100000) + for i := range result { + result[i] = byte(i * i & 0xFF) + } + return result +} + +func TestCRCBulkOld(t *testing.T) { + for _, x := range deflateTests { + y := x.out + if len(y) >= minMatchLength { + y = append(y, y...) + for j := 4; j < len(y); j++ { + y := y[:j] + dst := make([]uint32, len(y)-minMatchLength+1) + for i := range dst { + dst[i] = uint32(i + 100) + } + bulkHash4(y, dst) + for i, val := range dst { + got := val + expect := hash4(y[i:]) + if got != expect && got == uint32(i)+100 { + t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, expect) + } else if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect) + } else { + //t.Logf("Len:%d Index:%d OK (0x%08x)", len(y), i, got) + } + } + } + } + } +} + +func TestDeflate(t *testing.T) { + for _, h := range deflateTests { + var buf bytes.Buffer + w, err := NewWriter(&buf, h.level) + if err != nil { + t.Errorf("NewWriter: %v", err) + continue + } + w.Write(h.in) + w.Close() + if !bytes.Equal(buf.Bytes(), h.out) { + t.Errorf("Deflate(%d, %x) = \n%#v, want \n%#v", h.level, h.in, buf.Bytes(), h.out) + } + } +} + +// A sparseReader returns a stream consisting of 0s followed by 1<<16 1s. +// This tests missing hash references in a very large input. +type sparseReader struct { + l int64 + cur int64 +} + +func (r *sparseReader) Read(b []byte) (n int, err error) { + if r.cur >= r.l { + return 0, io.EOF + } + n = len(b) + cur := r.cur + int64(n) + if cur > r.l { + n -= int(cur - r.l) + cur = r.l + } + for i := range b[0:n] { + if r.cur+int64(i) >= r.l-1<<16 { + b[i] = 1 + } else { + b[i] = 0 + } + } + r.cur = cur + return +} + +func TestVeryLongSparseChunk(t *testing.T) { + if testing.Short() { + t.Skip("skipping sparse chunk during short test") + } + w, err := NewWriter(ioutil.Discard, 1) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + if _, err = io.Copy(w, &sparseReader{l: 23E8}); err != nil { + t.Errorf("Compress failed: %v", err) + return + } +} + +type syncBuffer struct { + buf bytes.Buffer + mu sync.RWMutex + closed bool + ready chan bool +} + +func newSyncBuffer() *syncBuffer { + return &syncBuffer{ready: make(chan bool, 1)} +} + +func (b *syncBuffer) Read(p []byte) (n int, err error) { + for { + b.mu.RLock() + n, err = b.buf.Read(p) + b.mu.RUnlock() + if n > 0 || b.closed { + return + } + <-b.ready + } +} + +func (b *syncBuffer) signal() { + select { + case b.ready <- true: + default: + } +} + +func (b *syncBuffer) Write(p []byte) (n int, err error) { + n, err = b.buf.Write(p) + b.signal() + return +} + +func (b *syncBuffer) WriteMode() { + b.mu.Lock() +} + +func (b *syncBuffer) ReadMode() { + b.mu.Unlock() + b.signal() +} + +func (b *syncBuffer) Close() error { + b.closed = true + b.signal() + return nil +} + +func testSync(t *testing.T, level int, input []byte, name string) { + if len(input) == 0 { + return + } + + t.Logf("--testSync %d, %d, %s", level, len(input), name) + buf := newSyncBuffer() + buf1 := new(bytes.Buffer) + buf.WriteMode() + w, err := NewWriter(io.MultiWriter(buf, buf1), level) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + r := NewReader(buf) + + // Write half the input and read back. + for i := 0; i < 2; i++ { + var lo, hi int + if i == 0 { + lo, hi = 0, (len(input)+1)/2 + } else { + lo, hi = (len(input)+1)/2, len(input) + } + t.Logf("#%d: write %d-%d", i, lo, hi) + if _, err := w.Write(input[lo:hi]); err != nil { + t.Errorf("testSync: write: %v", err) + return + } + if i == 0 { + if err := w.Flush(); err != nil { + t.Errorf("testSync: flush: %v", err) + return + } + } else { + if err := w.Close(); err != nil { + t.Errorf("testSync: close: %v", err) + } + } + buf.ReadMode() + out := make([]byte, hi-lo+1) + m, err := io.ReadAtLeast(r, out, hi-lo) + t.Logf("#%d: read %d", i, m) + if m != hi-lo || err != nil { + t.Errorf("testSync/%d (%d, %d, %s): read %d: %d, %v (%d left)", i, level, len(input), name, hi-lo, m, err, buf.buf.Len()) + return + } + if !bytes.Equal(input[lo:hi], out[:hi-lo]) { + t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo]) + return + } + // This test originally checked that after reading + // the first half of the input, there was nothing left + // in the read buffer (buf.buf.Len() != 0) but that is + // not necessarily the case: the write Flush may emit + // some extra framing bits that are not necessary + // to process to obtain the first half of the uncompressed + // data. The test ran correctly most of the time, because + // the background goroutine had usually read even + // those extra bits by now, but it's not a useful thing to + // check. + buf.WriteMode() + } + buf.ReadMode() + out := make([]byte, 10) + if n, err := r.Read(out); n > 0 || err != io.EOF { + t.Errorf("testSync (%d, %d, %s): final Read: %d, %v (hex: %x)", level, len(input), name, n, err, out[0:n]) + } + if buf.buf.Len() != 0 { + t.Errorf("testSync (%d, %d, %s): extra data at end", level, len(input), name) + } + r.Close() + + // stream should work for ordinary reader too + r = NewReader(buf1) + out, err = ioutil.ReadAll(r) + if err != nil { + t.Errorf("testSync: read: %s", err) + return + } + r.Close() + if !bytes.Equal(input, out) { + t.Errorf("testSync: decompress(compress(data)) != data: level=%d input=%s", level, name) + } +} + +func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) { + var buffer bytes.Buffer + w, err := NewWriter(&buffer, level) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + w.Write(input) + w.Close() + if limit > 0 && buffer.Len() > limit { + t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit) + return + } + if limit > 0 { + t.Logf("level: %d - Size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len()) + } + r := NewReader(&buffer) + out, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("read: %s", err) + return + } + r.Close() + if !bytes.Equal(input, out) { + t.Errorf("decompress(compress(data)) != data: level=%d input=%s", level, name) + return + } + testSync(t, level, input, name) +} + +func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int) { + for i := 0; i < 10; i++ { + testToFromWithLevelAndLimit(t, i, input, name, limit[i]) + } + testToFromWithLevelAndLimit(t, -2, input, name, limit[10]) +} + +func TestDeflateInflate(t *testing.T) { + for i, h := range deflateInflateTests { + testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{}) + } +} + +func TestReverseBits(t *testing.T) { + for _, h := range reverseBitsTests { + if v := reverseBits(h.in, h.bitCount); v != h.out { + t.Errorf("reverseBits(%v,%v) = %v, want %v", + h.in, h.bitCount, v, h.out) + } + } +} + +type deflateInflateStringTest struct { + filename string + label string + limit [11]int // Number 11 is ConstantCompression +} + +var deflateInflateStringTests = []deflateInflateStringTest{ + { + "../testdata/e.txt", + "2.718281828...", + [...]int{100018, 67900, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790, 43683 + 100}, + }, + { + "../testdata/Mark.Twain-Tom.Sawyer.txt", + "Mark.Twain-Tom.Sawyer", + [...]int{387999, 185000, 182361, 179974, 174124, 168819, 162936, 160506, 160295, 160295, 233460 + 100}, + }, +} + +func TestDeflateInflateString(t *testing.T) { + for _, test := range deflateInflateStringTests { + gold, err := ioutil.ReadFile(test.filename) + if err != nil { + t.Error(err) + } + // Remove returns that may be present on Windows + neutral := strings.Map(func(r rune) rune { + if r != '\r' { + return r + } + return -1 + }, string(gold)) + + testToFromWithLimit(t, []byte(neutral), test.label, test.limit) + + if testing.Short() { + break + } + } +} + +func TestReaderDict(t *testing.T) { + const ( + dict = "hello world" + text = "hello again world" + ) + var b bytes.Buffer + w, err := NewWriter(&b, 5) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + w.Write([]byte(dict)) + w.Flush() + b.Reset() + w.Write([]byte(text)) + w.Close() + + r := NewReaderDict(&b, []byte(dict)) + data, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(data) != "hello again world" { + t.Fatalf("read returned %q want %q", string(data), text) + } +} + +func TestWriterDict(t *testing.T) { + const ( + dict = "hello world Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua." + text = "hello world Lorem ipsum dolor sit amet" + ) + // This test is sensitive to algorithm changes that skip + // data in favour of speed. Higher levels are less prone to this + // so we test level 4-9. + for l := 4; l < 9; l++ { + var b bytes.Buffer + w, err := NewWriter(&b, l) + if err != nil { + t.Fatalf("level %d, NewWriter: %v", l, err) + } + w.Write([]byte(dict)) + w.Flush() + b.Reset() + w.Write([]byte(text)) + w.Close() + + var b1 bytes.Buffer + w, _ = NewWriterDict(&b1, l, []byte(dict)) + w.Write([]byte(text)) + w.Close() + + if !bytes.Equal(b1.Bytes(), b.Bytes()) { + t.Errorf("level %d, writer wrote\n%v\n want\n%v", l, b1.Bytes(), b.Bytes()) + } + } +} + +// See http://code.google.com/p/go/issues/detail?id=2508 +func TestRegression2508(t *testing.T) { + if testing.Short() { + t.Logf("test disabled with -short") + return + } + w, err := NewWriter(ioutil.Discard, 1) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + buf := make([]byte, 1024) + for i := 0; i < 131072; i++ { + if _, err := w.Write(buf); err != nil { + t.Fatalf("writer failed: %v", err) + } + } + w.Close() +} + +func TestWriterReset(t *testing.T) { + for level := -2; level <= 9; level++ { + if level == -1 { + level++ + } + if testing.Short() && level > 1 { + break + } + w, err := NewWriter(ioutil.Discard, level) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + buf := []byte("hello world") + for i := 0; i < 1024; i++ { + w.Write(buf) + } + w.Reset(ioutil.Discard) + + wref, err := NewWriter(ioutil.Discard, level) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + + // DeepEqual doesn't compare functions. + w.d.fill, wref.d.fill = nil, nil + w.d.step, wref.d.step = nil, nil + w.d.bulkHasher, wref.d.bulkHasher = nil, nil + w.d.snap, wref.d.snap = nil, nil + + // hashMatch is always overwritten when used. + copy(w.d.hashMatch[:], wref.d.hashMatch[:]) + if w.d.tokens.n != 0 { + t.Errorf("level %d Writer not reset after Reset. %d tokens were present", level, w.d.tokens.n) + } + // As long as the length is 0, we don't care about the content. + w.d.tokens = wref.d.tokens + + // We don't care if there are values in the window, as long as it is at d.index is 0 + w.d.window = wref.d.window + if !reflect.DeepEqual(w, wref) { + t.Errorf("level %d Writer not reset after Reset", level) + } + } + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, NoCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, DefaultCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, BestCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, ConstantCompression) }) + dict := []byte("we are the world") + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, NoCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, DefaultCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, BestCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, ConstantCompression, dict) }) +} + +func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error)) { + buf := new(bytes.Buffer) + w, err := newWriter(buf) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + b := []byte("hello world") + for i := 0; i < 1024; i++ { + w.Write(b) + } + w.Close() + out1 := buf.Bytes() + + buf2 := new(bytes.Buffer) + w.Reset(buf2) + for i := 0; i < 1024; i++ { + w.Write(b) + } + w.Close() + out2 := buf2.Bytes() + + if len(out1) != len(out2) { + t.Errorf("got %d, expected %d bytes", len(out2), len(out1)) + } + if bytes.Compare(out1, out2) != 0 { + mm := 0 + for i, b := range out1[:len(out2)] { + if b != out2[i] { + t.Errorf("mismatch index %d: %02x, expected %02x", i, out2[i], b) + } + mm++ + if mm == 10 { + t.Fatal("Stopping") + } + } + } + t.Logf("got %d bytes", len(out1)) +} + +// TestBestSpeed tests that round-tripping through deflate and then inflate +// recovers the original input. The Write sizes are near the thresholds in the +// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize +// (65535). +func TestBestSpeed(t *testing.T) { + abc := make([]byte, 128) + for i := range abc { + abc[i] = byte(i) + } + abcabc := bytes.Repeat(abc, 131072/len(abc)) + var want []byte + + testCases := [][]int{ + {65536, 0}, + {65536, 1}, + {65536, 1, 256}, + {65536, 1, 65536}, + {65536, 14}, + {65536, 15}, + {65536, 16}, + {65536, 16, 256}, + {65536, 16, 65536}, + {65536, 127}, + {65536, 128}, + {65536, 128, 256}, + {65536, 128, 65536}, + {65536, 129}, + {65536, 65536, 256}, + {65536, 65536, 65536}, + } + + for i, tc := range testCases { + for _, firstN := range []int{1, 65534, 65535, 65536, 65537, 131072} { + tc[0] = firstN + outer: + for _, flush := range []bool{false, true} { + buf := new(bytes.Buffer) + want = want[:0] + + w, err := NewWriter(buf, BestSpeed) + if err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: NewWriter: %v", i, firstN, flush, err) + continue + } + for _, n := range tc { + want = append(want, abcabc[:n]...) + if _, err := w.Write(abcabc[:n]); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Write: %v", i, firstN, flush, err) + continue outer + } + if !flush { + continue + } + if err := w.Flush(); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Flush: %v", i, firstN, flush, err) + continue outer + } + } + if err := w.Close(); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Close: %v", i, firstN, flush, err) + continue + } + + r := NewReader(buf) + got, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err) + continue + } + r.Close() + + if !bytes.Equal(got, want) { + t.Errorf("i=%d, firstN=%d, flush=%t: corruption during deflate-then-inflate", i, firstN, flush) + continue + } + } + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 00000000000..71c75a065ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// * Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// * Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go new file mode 100644 index 00000000000..9275cff7911 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go @@ -0,0 +1,139 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "strings" + "testing" +) + +func TestDictDecoder(t *testing.T) { + const ( + abc = "ABC\n" + fox = "The quick brown fox jumped over the lazy dog!\n" + poem = "The Road Not Taken\nRobert Frost\n" + + "\n" + + "Two roads diverged in a yellow wood,\n" + + "And sorry I could not travel both\n" + + "And be one traveler, long I stood\n" + + "And looked down one as far as I could\n" + + "To where it bent in the undergrowth;\n" + + "\n" + + "Then took the other, as just as fair,\n" + + "And having perhaps the better claim,\n" + + "Because it was grassy and wanted wear;\n" + + "Though as for that the passing there\n" + + "Had worn them really about the same,\n" + + "\n" + + "And both that morning equally lay\n" + + "In leaves no step had trodden black.\n" + + "Oh, I kept the first for another day!\n" + + "Yet knowing how way leads on to way,\n" + + "I doubted if I should ever come back.\n" + + "\n" + + "I shall be telling this with a sigh\n" + + "Somewhere ages and ages hence:\n" + + "Two roads diverged in a wood, and I-\n" + + "I took the one less traveled by,\n" + + "And that has made all the difference.\n" + ) + + var poemRefs = []struct { + dist int // Backward distance (0 if this is an insertion) + length int // Length of copy or insertion + }{ + {0, 38}, {33, 3}, {0, 48}, {79, 3}, {0, 11}, {34, 5}, {0, 6}, {23, 7}, + {0, 8}, {50, 3}, {0, 2}, {69, 3}, {34, 5}, {0, 4}, {97, 3}, {0, 4}, + {43, 5}, {0, 6}, {7, 4}, {88, 7}, {0, 12}, {80, 3}, {0, 2}, {141, 4}, + {0, 1}, {196, 3}, {0, 3}, {157, 3}, {0, 6}, {181, 3}, {0, 2}, {23, 3}, + {77, 3}, {28, 5}, {128, 3}, {110, 4}, {70, 3}, {0, 4}, {85, 6}, {0, 2}, + {182, 6}, {0, 4}, {133, 3}, {0, 7}, {47, 5}, {0, 20}, {112, 5}, {0, 1}, + {58, 3}, {0, 8}, {59, 3}, {0, 4}, {173, 3}, {0, 5}, {114, 3}, {0, 4}, + {92, 5}, {0, 2}, {71, 3}, {0, 2}, {76, 5}, {0, 1}, {46, 3}, {96, 4}, + {130, 4}, {0, 3}, {360, 3}, {0, 3}, {178, 5}, {0, 7}, {75, 3}, {0, 3}, + {45, 6}, {0, 6}, {299, 6}, {180, 3}, {70, 6}, {0, 1}, {48, 3}, {66, 4}, + {0, 3}, {47, 5}, {0, 9}, {325, 3}, {0, 1}, {359, 3}, {318, 3}, {0, 2}, + {199, 3}, {0, 1}, {344, 3}, {0, 3}, {248, 3}, {0, 10}, {310, 3}, {0, 3}, + {93, 6}, {0, 3}, {252, 3}, {157, 4}, {0, 2}, {273, 5}, {0, 14}, {99, 4}, + {0, 1}, {464, 4}, {0, 2}, {92, 4}, {495, 3}, {0, 1}, {322, 4}, {16, 4}, + {0, 3}, {402, 3}, {0, 2}, {237, 4}, {0, 2}, {432, 4}, {0, 1}, {483, 5}, + {0, 2}, {294, 4}, {0, 2}, {306, 3}, {113, 5}, {0, 1}, {26, 4}, {164, 3}, + {488, 4}, {0, 1}, {542, 3}, {248, 6}, {0, 5}, {205, 3}, {0, 8}, {48, 3}, + {449, 6}, {0, 2}, {192, 3}, {328, 4}, {9, 5}, {433, 3}, {0, 3}, {622, 25}, + {615, 5}, {46, 5}, {0, 2}, {104, 3}, {475, 10}, {549, 3}, {0, 4}, {597, 8}, + {314, 3}, {0, 1}, {473, 6}, {317, 5}, {0, 1}, {400, 3}, {0, 3}, {109, 3}, + {151, 3}, {48, 4}, {0, 4}, {125, 3}, {108, 3}, {0, 2}, + } + + var got, want bytes.Buffer + var dd dictDecoder + dd.init(1<<11, nil) + + var writeCopy = func(dist, length int) { + for length > 0 { + cnt := dd.tryWriteCopy(dist, length) + if cnt == 0 { + cnt = dd.writeCopy(dist, length) + } + + length -= cnt + if dd.availWrite() == 0 { + got.Write(dd.readFlush()) + } + } + } + var writeString = func(str string) { + for len(str) > 0 { + cnt := copy(dd.writeSlice(), str) + str = str[cnt:] + dd.writeMark(cnt) + if dd.availWrite() == 0 { + got.Write(dd.readFlush()) + } + } + } + + writeString(".") + want.WriteByte('.') + + str := poem + for _, ref := range poemRefs { + if ref.dist == 0 { + writeString(str[:ref.length]) + } else { + writeCopy(ref.dist, ref.length) + } + str = str[ref.length:] + } + want.WriteString(poem) + + writeCopy(dd.histSize(), 33) + want.Write(want.Bytes()[:33]) + + writeString(abc) + writeCopy(len(abc), 59*len(abc)) + want.WriteString(strings.Repeat(abc, 60)) + + writeString(fox) + writeCopy(len(fox), 9*len(fox)) + want.WriteString(strings.Repeat(fox, 10)) + + writeString(".") + writeCopy(1, 9) + want.WriteString(strings.Repeat(".", 10)) + + writeString(strings.ToUpper(poem)) + writeCopy(len(poem), 7*len(poem)) + want.WriteString(strings.Repeat(strings.ToUpper(poem), 8)) + + writeCopy(dd.histSize(), 10) + want.Write(want.Bytes()[want.Len()-dd.histSize():][:10]) + + got.Write(dd.readFlush()) + if got.String() != want.String() { + t.Errorf("final string mismatch:\ngot %q\nwant %q", got.String(), want.String()) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/flate_test.go b/vendor/github.com/klauspost/compress/flate/flate_test.go new file mode 100644 index 00000000000..3f67025cd76 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/flate_test.go @@ -0,0 +1,260 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test tests some internals of the flate package. +// The tests in package compress/gzip serve as the +// end-to-end test of the decompressor. + +package flate + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "testing" +) + +// The following test should not panic. +func TestIssue5915(t *testing.T) { + bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, 5, 5, 6, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 6, 0, 11, 0, 8, 0, 6, 6, 10, 8} + var h huffmanDecoder + if h.init(bits) { + t.Fatalf("Given sequence of bits is bad, and should not succeed.") + } +} + +// The following test should not panic. +func TestIssue5962(t *testing.T) { + bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, + 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11} + var h huffmanDecoder + if h.init(bits) { + t.Fatalf("Given sequence of bits is bad, and should not succeed.") + } +} + +// The following test should not panic. +func TestIssue6255(t *testing.T) { + bits1 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11} + bits2 := []int{11, 13} + var h huffmanDecoder + if !h.init(bits1) { + t.Fatalf("Given sequence of bits is good and should succeed.") + } + if h.init(bits2) { + t.Fatalf("Given sequence of bits is bad and should not succeed.") + } +} + +func TestInvalidEncoding(t *testing.T) { + // Initialize Huffman decoder to recognize "0". + var h huffmanDecoder + if !h.init([]int{1}) { + t.Fatal("Failed to initialize Huffman decoder") + } + + // Initialize decompressor with invalid Huffman coding. + var f decompressor + f.r = bytes.NewReader([]byte{0xff}) + + _, err := f.huffSym(&h) + if err == nil { + t.Fatal("Should have rejected invalid bit sequence") + } +} + +func TestInvalidBits(t *testing.T) { + oversubscribed := []int{1, 2, 3, 4, 4, 5} + incomplete := []int{1, 2, 4, 4} + var h huffmanDecoder + if h.init(oversubscribed) { + t.Fatal("Should reject oversubscribed bit-length set") + } + if h.init(incomplete) { + t.Fatal("Should reject incomplete bit-length set") + } +} + +func TestStreams(t *testing.T) { + // To verify any of these hexstrings as valid or invalid flate streams + // according to the C zlib library, you can use the Python wrapper library: + // >>> hex_string = "010100feff11" + // >>> import zlib + // >>> zlib.decompress(hex_string.decode("hex"), -15) # Negative means raw DEFLATE + // '\x11' + + testCases := []struct { + desc string // Description of the stream + stream string // Hexstring of the input DEFLATE stream + want string // Expected result. Use "fail" to expect failure + }{{ + "degenerate HCLenTree", + "05e0010000000000100000000000000000000000000000000000000000000000" + + "00000000000000000004", + "fail", + }, { + "complete HCLenTree, empty HLitTree, empty HDistTree", + "05e0010400000000000000000000000000000000000000000000000000000000" + + "00000000000000000010", + "fail", + }, { + "empty HCLenTree", + "05e0010000000000000000000000000000000000000000000000000000000000" + + "00000000000000000010", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, use missing HDist symbol", + "000100feff000de0010400000000100000000000000000000000000000000000" + + "0000000000000000000000000000002c", + "fail", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use missing HDist symbol", + "000100feff000de0010000000000000000000000000000000000000000000000" + + "00000000000000000610000000004070", + "fail", + }, { + "complete HCLenTree, empty HLitTree, empty HDistTree", + "05e0010400000000100400000000000000000000000000000000000000000000" + + "0000000000000000000000000008", + "fail", + }, { + "complete HCLenTree, empty HLitTree, degenerate HDistTree", + "05e0010400000000100400000000000000000000000000000000000000000000" + + "0000000000000000000800000008", + "fail", + }, { + "complete HCLenTree, degenerate HLitTree, degenerate HDistTree, use missing HLit symbol", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000001c", + "fail", + }, { + "complete HCLenTree, complete HLitTree, too large HDistTree", + "edff870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000080000000000000004", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, excessive repeater code", + "edfd870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000e8b100", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree of normal length 30", + "05fd01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07000000fe01", + "", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree of excessive length 31", + "05fe01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07000000fc03", + "fail", + }, { + "complete HCLenTree, over-subscribed HLitTree, empty HDistTree", + "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07f00f", + "fail", + }, { + "complete HCLenTree, under-subscribed HLitTree, empty HDistTree", + "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" + + "fffffffffcffffffff07f00f", + "fail", + }, { + "complete HCLenTree, complete HLitTree with single code, empty HDistTree", + "05e001240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07f00f", + "01", + }, { + "complete HCLenTree, complete HLitTree with multiple codes, empty HDistTree", + "05e301240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07807f", + "01", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HDist symbol", + "000100feff000de0010400000000100000000000000000000000000000000000" + + "0000000000000000000000000000003c", + "00000000", + }, { + "complete HCLenTree, degenerate HLitTree, degenerate HDistTree", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000000c", + "", + }, { + "complete HCLenTree, degenerate HLitTree, empty HDistTree", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "00000000000000000004", + "", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, spanning repeater code", + "edfd870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000e8b000", + "", + }, { + "complete HCLenTree with length codes, complete HLitTree, empty HDistTree", + "ede0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000000400004000", + "", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit symbol 284 with count 31", + "000100feff00ede0010400000000100000000000000000000000000000000000" + + "000000000000000000000000000000040000407f00", + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "000000", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit and HDist symbols", + "0cc2010d00000082b0ac4aff0eb07d27060000ffff", + "616263616263", + }, { + "fixed block, use reserved symbol 287", + "33180700", + "fail", + }, { + "raw block", + "010100feff11", + "11", + }, { + "issue 10426 - over-subscribed HCLenTree causes a hang", + "344c4a4e494d4b070000ff2e2eff2e2e2e2e2eff", + "fail", + }, { + "issue 11030 - empty HDistTree unexpectedly leads to error", + "05c0070600000080400fff37a0ca", + "", + }, { + "issue 11033 - empty HDistTree unexpectedly leads to error", + "050fb109c020cca5d017dcbca044881ee1034ec149c8980bbc413c2ab35be9dc" + + "b1473449922449922411202306ee97b0383a521b4ffdcf3217f9f7d3adb701", + "3130303634342068652e706870005d05355f7ed957ff084a90925d19e3ebc6d0" + + "c6d7", + }} + + for i, tc := range testCases { + data, err := hex.DecodeString(tc.stream) + if err != nil { + t.Fatal(err) + } + data, err = ioutil.ReadAll(NewReader(bytes.NewReader(data))) + if tc.want == "fail" { + if err == nil { + t.Errorf("#%d (%s): got nil error, want non-nil", i, tc.desc) + } + } else { + if err != nil { + t.Errorf("#%d (%s): %v", i, tc.desc, err) + continue + } + if got := hex.EncodeToString(data); got != tc.want { + t.Errorf("#%d (%s):\ngot %q\nwant %q", i, tc.desc, got, tc.want) + } + + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go new file mode 100644 index 00000000000..154c89a488e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/gen.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates fixedhuff.go +// Invoke as +// +// go run gen.go -output fixedhuff.go + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" +) + +var filename = flag.String("output", "fixedhuff.go", "output file name") + +const maxCodeLen = 16 + +// Note: the definition of the huffmanDecoder struct is copied from +// inflate.go, as it is private to the implementation. + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +func main() { + flag.Parse() + + var h huffmanDecoder + var bits [288]int + initReverseByte() + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + h.init(bits[:]) + if h.links != nil { + log.Fatal("Unexpected links table in fixed Huffman decoder") + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.`+"\n\n") + + fmt.Fprintln(&buf, "package flate") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") + fmt.Fprintf(&buf, "\t%d,\n", h.min) + fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") + for i := 0; i < huffmanNumChunks; i++ { + if i&7 == 0 { + fmt.Fprintf(&buf, "\t\t") + } else { + fmt.Fprintf(&buf, " ") + } + fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) + if i&7 == 7 { + fmt.Fprintln(&buf) + } + } + fmt.Fprintln(&buf, "\t},") + fmt.Fprintln(&buf, "\tnil, 0,") + fmt.Fprintln(&buf, "}") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } +} + +var reverseByte [256]byte + +func initReverseByte() { + for x := 0; x < 256; x++ { + var result byte + for i := uint(0); i < 8; i++ { + result |= byte(((x >> i) & 1) << (7 - i)) + } + reverseByte[x] = result + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 00000000000..f9b2a699a3d --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,701 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "io" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 240 + + // bufferSize is the actual output byte buffer size. + // It must have additional headroom for a flush + // which can contain up to 8 bytes. + bufferSize = bufferFlushSize + 8 +) + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = []int8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = []uint32{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// offset code word extra bits. +var offsetExtraBits = []int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, +} + +var offsetBase = []uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint + bytes [bufferSize]byte + codegenFreq [codegenCodeCount]int32 + nbytes int + literalFreq []int32 + offsetFreq []int32 + codegen []uint8 + literalEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error +} + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalFreq: make([]int32, maxNumLit), + offsetFreq: make([]int32, offsetCodeCount), + codegen: make([]uint8, maxNumLit+offsetCodeCount+1), + literalEncoding: newHuffmanEncoder(maxNumLit), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.bytes = [bufferSize]byte{} +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint) { + if w.err != nil { + return + } + w.bits |= uint64(b) << w.nbits + w.nbits += nb + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = uint8(litEnc.codes[i].len) + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = uint8(offEnc.codes[i].len) + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + header := 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7 + size = header + + litEnc.bitLength(w.literalFreq) + + offEnc.bitLength(w.offsetFreq) + + extraBits + + return size, numCodegens +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq) + + fixedOffsetEncoding.bitLength(w.offsetFreq) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + if w.err != nil { + return + } + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord int = int(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + break + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + break + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + break + } + } +} + +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + // We only bother calculating the costs of the extra bits required by + // the length of offset fields (which will be the same for both fixed + // and dynamic encoding), if we need to compare those two encodings + // against stored encoding. + for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { + // First eight length codes have extra size = 0. + extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) + } + for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { + // First four offset codes have extra size = 0. + extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) + } + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = w.fixedSize(extraBits) + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize < size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + + // Write the tokens. + w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + for i := range w.offsetFreq { + w.offsetFreq[i] = 0 + } + + for _, t := range tokens { + if t < matchType { + w.literalFreq[t.literal()]++ + continue + } + length := t.length() + offset := t.offset() + w.literalFreq[lengthCodesStart+lengthCode(length)]++ + w.offsetFreq[offsetCode(offset)]++ + } + + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + w.literalEncoding.generate(w.literalFreq, 15) + w.offsetEncoding.generate(w.offsetFreq, 15) + return +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + for _, t := range tokens { + if t < matchType { + w.writeCode(leCodes[t.literal()]) + continue + } + // Write the length + length := t.length() + lengthCode := lengthCode(length) + w.writeCode(leCodes[lengthCode+lengthCodesStart]) + extraLengthBits := uint(lengthExtraBits[lengthCode]) + if extraLengthBits > 0 { + extraLength := int32(length - lengthBase[lengthCode]) + w.writeBits(extraLength, extraLengthBits) + } + // Write the offset + offset := t.offset() + offsetCode := offsetCode(offset) + w.writeCode(oeCodes[offsetCode]) + extraOffsetBits := uint(offsetExtraBits[offsetCode]) + if extraOffsetBits > 0 { + extraOffset := int32(offset - offsetBase[offsetCode]) + w.writeBits(extraOffset, extraOffsetBits) + } + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq, 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + + // Add everything as literals + histogram(input, w.literalFreq) + + w.literalFreq[endBlockMarker] = 1 + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + w.literalEncoding.generate(w.literalFreq, 15) + + // Figure out smallest code. + // Always use dynamic Huffman or Store + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + encoding := w.literalEncoding.codes[:257] + n := w.nbytes + for _, t := range input { + // Bitwriting inlined, ~30% speedup + c := encoding[t] + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits < 48 { + continue + } + // Store 6 bytes + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n < bufferFlushSize { + continue + } + w.write(w.bytes[:n]) + if w.err != nil { + return // Return early in the event of write failures + } + n = 0 + } + w.nbytes = n + w.writeCode(encoding[endBlockMarker]) +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go new file mode 100644 index 00000000000..882d3abec18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go @@ -0,0 +1,366 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +var update = flag.Bool("update", false, "update reference files") + +// TestBlockHuff tests huffman encoding against reference files +// to detect possible regressions. +// If encoding/bit allocation changes you can regenerate these files +// by using the -update flag. +func TestBlockHuff(t *testing.T) { + // determine input files + match, err := filepath.Glob("testdata/huffman-*.in") + if err != nil { + t.Fatal(err) + } + + for _, in := range match { + out := in // for files where input and output are identical + if strings.HasSuffix(in, ".in") { + out = in[:len(in)-len(".in")] + ".golden" + } + testBlockHuff(t, in, out) + } +} + +func testBlockHuff(t *testing.T, in, out string) { + all, err := ioutil.ReadFile(in) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + bw.writeBlockHuff(false, all) + bw.flush() + got := buf.Bytes() + + want, err := ioutil.ReadFile(out) + if err != nil && !*update { + t.Error(err) + return + } + + t.Logf("Testing %q", in) + if !bytes.Equal(got, want) { + if *update { + if in != out { + t.Logf("Updating %q", out) + if err := ioutil.WriteFile(out, got, 0666); err != nil { + t.Error(err) + } + return + } + // in == out: don't accidentally destroy input + t.Errorf("WARNING: -update did not rewrite input file %s", in) + } + + t.Errorf("%q != %q (see %q)", in, out, in+".got") + if err := ioutil.WriteFile(in+".got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + bw.writeBlockHuff(false, all) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("after reset %q != %q (see %q)", in, out, in+".reset.got") + if err := ioutil.WriteFile(in+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "huff", huffTest{input: in}, true) +} + +type huffTest struct { + tokens []token + input string // File name of input data matching the tokens. + want string // File name of data with the expected output with input available. + wantNoInput string // File name of the expected output when no input is available. +} + +const ml = 0x7fc00000 // Maximum length token. Used to reduce the size of writeBlockTests + +var writeBlockTests = []huffTest{ + { + input: "testdata/huffman-null-max.in", + want: "testdata/huffman-null-max.%s.expect", + wantNoInput: "testdata/huffman-null-max.%s.expect-noinput", + tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x0, 0x0}, + }, + { + input: "testdata/huffman-pi.in", + want: "testdata/huffman-pi.%s.expect", + wantNoInput: "testdata/huffman-pi.%s.expect-noinput", + tokens: []token{0x33, 0x2e, 0x31, 0x34, 0x31, 0x35, 0x39, 0x32, 0x36, 0x35, 0x33, 0x35, 0x38, 0x39, 0x37, 0x39, 0x33, 0x32, 0x33, 0x38, 0x34, 0x36, 0x32, 0x36, 0x34, 0x33, 0x33, 0x38, 0x33, 0x32, 0x37, 0x39, 0x35, 0x30, 0x32, 0x38, 0x38, 0x34, 0x31, 0x39, 0x37, 0x31, 0x36, 0x39, 0x33, 0x39, 0x39, 0x33, 0x37, 0x35, 0x31, 0x30, 0x35, 0x38, 0x32, 0x30, 0x39, 0x37, 0x34, 0x39, 0x34, 0x34, 0x35, 0x39, 0x32, 0x33, 0x30, 0x37, 0x38, 0x31, 0x36, 0x34, 0x30, 0x36, 0x32, 0x38, 0x36, 0x32, 0x30, 0x38, 0x39, 0x39, 0x38, 0x36, 0x32, 0x38, 0x30, 0x33, 0x34, 0x38, 0x32, 0x35, 0x33, 0x34, 0x32, 0x31, 0x31, 0x37, 0x30, 0x36, 0x37, 0x39, 0x38, 0x32, 0x31, 0x34, 0x38, 0x30, 0x38, 0x36, 0x35, 0x31, 0x33, 0x32, 0x38, 0x32, 0x33, 0x30, 0x36, 0x36, 0x34, 0x37, 0x30, 0x39, 0x33, 0x38, 0x34, 0x34, 0x36, 0x30, 0x39, 0x35, 0x35, 0x30, 0x35, 0x38, 0x32, 0x32, 0x33, 0x31, 0x37, 0x32, 0x35, 0x33, 0x35, 0x39, 0x34, 0x30, 0x38, 0x31, 0x32, 0x38, 0x34, 0x38, 0x31, 0x31, 0x31, 0x37, 0x34, 0x4040007e, 0x34, 0x31, 0x30, 0x32, 0x37, 0x30, 0x31, 0x39, 0x33, 0x38, 0x35, 0x32, 0x31, 0x31, 0x30, 0x35, 0x35, 0x35, 0x39, 0x36, 0x34, 0x34, 0x36, 0x32, 0x32, 0x39, 0x34, 0x38, 0x39, 0x35, 0x34, 0x39, 0x33, 0x30, 0x33, 0x38, 0x31, 0x40400012, 0x32, 0x38, 0x38, 0x31, 0x30, 0x39, 0x37, 0x35, 0x36, 0x36, 0x35, 0x39, 0x33, 0x33, 0x34, 0x34, 0x36, 0x40400047, 0x37, 0x35, 0x36, 0x34, 0x38, 0x32, 0x33, 0x33, 0x37, 0x38, 0x36, 0x37, 0x38, 0x33, 0x31, 0x36, 0x35, 0x32, 0x37, 0x31, 0x32, 0x30, 0x31, 0x39, 0x30, 0x39, 0x31, 0x34, 0x4040001a, 0x35, 0x36, 0x36, 0x39, 0x32, 0x33, 0x34, 0x36, 0x404000b2, 0x36, 0x31, 0x30, 0x34, 0x35, 0x34, 0x33, 0x32, 0x36, 0x40400032, 0x31, 0x33, 0x33, 0x39, 0x33, 0x36, 0x30, 0x37, 0x32, 0x36, 0x30, 0x32, 0x34, 0x39, 0x31, 0x34, 0x31, 0x32, 0x37, 0x33, 0x37, 0x32, 0x34, 0x35, 0x38, 0x37, 0x30, 0x30, 0x36, 0x36, 0x30, 0x36, 0x33, 0x31, 0x35, 0x35, 0x38, 0x38, 0x31, 0x37, 0x34, 0x38, 0x38, 0x31, 0x35, 0x32, 0x30, 0x39, 0x32, 0x30, 0x39, 0x36, 0x32, 0x38, 0x32, 0x39, 0x32, 0x35, 0x34, 0x30, 0x39, 0x31, 0x37, 0x31, 0x35, 0x33, 0x36, 0x34, 0x33, 0x36, 0x37, 0x38, 0x39, 0x32, 0x35, 0x39, 0x30, 0x33, 0x36, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x30, 0x35, 0x33, 0x30, 0x35, 0x34, 0x38, 0x38, 0x32, 0x30, 0x34, 0x36, 0x36, 0x35, 0x32, 0x31, 0x33, 0x38, 0x34, 0x31, 0x34, 0x36, 0x39, 0x35, 0x31, 0x39, 0x34, 0x31, 0x35, 0x31, 0x31, 0x36, 0x30, 0x39, 0x34, 0x33, 0x33, 0x30, 0x35, 0x37, 0x32, 0x37, 0x30, 0x33, 0x36, 0x35, 0x37, 0x35, 0x39, 0x35, 0x39, 0x31, 0x39, 0x35, 0x33, 0x30, 0x39, 0x32, 0x31, 0x38, 0x36, 0x31, 0x31, 0x37, 0x404000e9, 0x33, 0x32, 0x40400009, 0x39, 0x33, 0x31, 0x30, 0x35, 0x31, 0x31, 0x38, 0x35, 0x34, 0x38, 0x30, 0x37, 0x4040010e, 0x33, 0x37, 0x39, 0x39, 0x36, 0x32, 0x37, 0x34, 0x39, 0x35, 0x36, 0x37, 0x33, 0x35, 0x31, 0x38, 0x38, 0x35, 0x37, 0x35, 0x32, 0x37, 0x32, 0x34, 0x38, 0x39, 0x31, 0x32, 0x32, 0x37, 0x39, 0x33, 0x38, 0x31, 0x38, 0x33, 0x30, 0x31, 0x31, 0x39, 0x34, 0x39, 0x31, 0x32, 0x39, 0x38, 0x33, 0x33, 0x36, 0x37, 0x33, 0x33, 0x36, 0x32, 0x34, 0x34, 0x30, 0x36, 0x35, 0x36, 0x36, 0x34, 0x33, 0x30, 0x38, 0x36, 0x30, 0x32, 0x31, 0x33, 0x39, 0x34, 0x39, 0x34, 0x36, 0x33, 0x39, 0x35, 0x32, 0x32, 0x34, 0x37, 0x33, 0x37, 0x31, 0x39, 0x30, 0x37, 0x30, 0x32, 0x31, 0x37, 0x39, 0x38, 0x40800099, 0x37, 0x30, 0x32, 0x37, 0x37, 0x30, 0x35, 0x33, 0x39, 0x32, 0x31, 0x37, 0x31, 0x37, 0x36, 0x32, 0x39, 0x33, 0x31, 0x37, 0x36, 0x37, 0x35, 0x40800232, 0x37, 0x34, 0x38, 0x31, 0x40400006, 0x36, 0x36, 0x39, 0x34, 0x30, 0x404001e7, 0x30, 0x30, 0x30, 0x35, 0x36, 0x38, 0x31, 0x32, 0x37, 0x31, 0x34, 0x35, 0x32, 0x36, 0x33, 0x35, 0x36, 0x30, 0x38, 0x32, 0x37, 0x37, 0x38, 0x35, 0x37, 0x37, 0x31, 0x33, 0x34, 0x32, 0x37, 0x35, 0x37, 0x37, 0x38, 0x39, 0x36, 0x40400129, 0x33, 0x36, 0x33, 0x37, 0x31, 0x37, 0x38, 0x37, 0x32, 0x31, 0x34, 0x36, 0x38, 0x34, 0x34, 0x30, 0x39, 0x30, 0x31, 0x32, 0x32, 0x34, 0x39, 0x35, 0x33, 0x34, 0x33, 0x30, 0x31, 0x34, 0x36, 0x35, 0x34, 0x39, 0x35, 0x38, 0x35, 0x33, 0x37, 0x31, 0x30, 0x35, 0x30, 0x37, 0x39, 0x404000ca, 0x36, 0x40400153, 0x38, 0x39, 0x32, 0x33, 0x35, 0x34, 0x404001c9, 0x39, 0x35, 0x36, 0x31, 0x31, 0x32, 0x31, 0x32, 0x39, 0x30, 0x32, 0x31, 0x39, 0x36, 0x30, 0x38, 0x36, 0x34, 0x30, 0x33, 0x34, 0x34, 0x31, 0x38, 0x31, 0x35, 0x39, 0x38, 0x31, 0x33, 0x36, 0x32, 0x39, 0x37, 0x37, 0x34, 0x40400074, 0x30, 0x39, 0x39, 0x36, 0x30, 0x35, 0x31, 0x38, 0x37, 0x30, 0x37, 0x32, 0x31, 0x31, 0x33, 0x34, 0x39, 0x40800000, 0x38, 0x33, 0x37, 0x32, 0x39, 0x37, 0x38, 0x30, 0x34, 0x39, 0x39, 0x404002da, 0x39, 0x37, 0x33, 0x31, 0x37, 0x33, 0x32, 0x38, 0x4040018a, 0x36, 0x33, 0x31, 0x38, 0x35, 0x40400301, 0x404002e8, 0x34, 0x35, 0x35, 0x33, 0x34, 0x36, 0x39, 0x30, 0x38, 0x33, 0x30, 0x32, 0x36, 0x34, 0x32, 0x35, 0x32, 0x32, 0x33, 0x30, 0x404002e3, 0x40400267, 0x38, 0x35, 0x30, 0x33, 0x35, 0x32, 0x36, 0x31, 0x39, 0x33, 0x31, 0x31, 0x40400212, 0x31, 0x30, 0x31, 0x30, 0x30, 0x30, 0x33, 0x31, 0x33, 0x37, 0x38, 0x33, 0x38, 0x37, 0x35, 0x32, 0x38, 0x38, 0x36, 0x35, 0x38, 0x37, 0x35, 0x33, 0x33, 0x32, 0x30, 0x38, 0x33, 0x38, 0x31, 0x34, 0x32, 0x30, 0x36, 0x40400140, 0x4040012b, 0x31, 0x34, 0x37, 0x33, 0x30, 0x33, 0x35, 0x39, 0x4080032e, 0x39, 0x30, 0x34, 0x32, 0x38, 0x37, 0x35, 0x35, 0x34, 0x36, 0x38, 0x37, 0x33, 0x31, 0x31, 0x35, 0x39, 0x35, 0x40400355, 0x33, 0x38, 0x38, 0x32, 0x33, 0x35, 0x33, 0x37, 0x38, 0x37, 0x35, 0x4080037f, 0x39, 0x4040013a, 0x31, 0x40400148, 0x38, 0x30, 0x35, 0x33, 0x4040018a, 0x32, 0x32, 0x36, 0x38, 0x30, 0x36, 0x36, 0x31, 0x33, 0x30, 0x30, 0x31, 0x39, 0x32, 0x37, 0x38, 0x37, 0x36, 0x36, 0x31, 0x31, 0x31, 0x39, 0x35, 0x39, 0x40400237, 0x36, 0x40800124, 0x38, 0x39, 0x33, 0x38, 0x30, 0x39, 0x35, 0x32, 0x35, 0x37, 0x32, 0x30, 0x31, 0x30, 0x36, 0x35, 0x34, 0x38, 0x35, 0x38, 0x36, 0x33, 0x32, 0x37, 0x4040009a, 0x39, 0x33, 0x36, 0x31, 0x35, 0x33, 0x40400220, 0x4080015c, 0x32, 0x33, 0x30, 0x33, 0x30, 0x31, 0x39, 0x35, 0x32, 0x30, 0x33, 0x35, 0x33, 0x30, 0x31, 0x38, 0x35, 0x32, 0x40400171, 0x40400075, 0x33, 0x36, 0x32, 0x32, 0x35, 0x39, 0x39, 0x34, 0x31, 0x33, 0x40400254, 0x34, 0x39, 0x37, 0x32, 0x31, 0x37, 0x404000de, 0x33, 0x34, 0x37, 0x39, 0x31, 0x33, 0x31, 0x35, 0x31, 0x35, 0x35, 0x37, 0x34, 0x38, 0x35, 0x37, 0x32, 0x34, 0x32, 0x34, 0x35, 0x34, 0x31, 0x35, 0x30, 0x36, 0x39, 0x4040013f, 0x38, 0x32, 0x39, 0x35, 0x33, 0x33, 0x31, 0x31, 0x36, 0x38, 0x36, 0x31, 0x37, 0x32, 0x37, 0x38, 0x40400337, 0x39, 0x30, 0x37, 0x35, 0x30, 0x39, 0x4040010d, 0x37, 0x35, 0x34, 0x36, 0x33, 0x37, 0x34, 0x36, 0x34, 0x39, 0x33, 0x39, 0x33, 0x31, 0x39, 0x32, 0x35, 0x35, 0x30, 0x36, 0x30, 0x34, 0x30, 0x30, 0x39, 0x4040026b, 0x31, 0x36, 0x37, 0x31, 0x31, 0x33, 0x39, 0x30, 0x30, 0x39, 0x38, 0x40400335, 0x34, 0x30, 0x31, 0x32, 0x38, 0x35, 0x38, 0x33, 0x36, 0x31, 0x36, 0x30, 0x33, 0x35, 0x36, 0x33, 0x37, 0x30, 0x37, 0x36, 0x36, 0x30, 0x31, 0x30, 0x34, 0x40400172, 0x38, 0x31, 0x39, 0x34, 0x32, 0x39, 0x4080041e, 0x404000ef, 0x4040028b, 0x37, 0x38, 0x33, 0x37, 0x34, 0x404004a8, 0x38, 0x32, 0x35, 0x35, 0x33, 0x37, 0x40800209, 0x32, 0x36, 0x38, 0x4040002e, 0x34, 0x30, 0x34, 0x37, 0x404001d1, 0x34, 0x404004b5, 0x4040038d, 0x38, 0x34, 0x404003a8, 0x36, 0x40c0031f, 0x33, 0x33, 0x31, 0x33, 0x36, 0x37, 0x37, 0x30, 0x32, 0x38, 0x39, 0x38, 0x39, 0x31, 0x35, 0x32, 0x40400062, 0x35, 0x32, 0x31, 0x36, 0x32, 0x30, 0x35, 0x36, 0x39, 0x36, 0x40400411, 0x30, 0x35, 0x38, 0x40400477, 0x35, 0x40400498, 0x35, 0x31, 0x31, 0x40400209, 0x38, 0x32, 0x34, 0x33, 0x30, 0x30, 0x33, 0x35, 0x35, 0x38, 0x37, 0x36, 0x34, 0x30, 0x32, 0x34, 0x37, 0x34, 0x39, 0x36, 0x34, 0x37, 0x33, 0x32, 0x36, 0x33, 0x4040043e, 0x39, 0x39, 0x32, 0x4040044b, 0x34, 0x32, 0x36, 0x39, 0x40c002c5, 0x37, 0x404001d6, 0x34, 0x4040053d, 0x4040041d, 0x39, 0x33, 0x34, 0x31, 0x37, 0x404001ad, 0x31, 0x32, 0x4040002a, 0x34, 0x4040019e, 0x31, 0x35, 0x30, 0x33, 0x30, 0x32, 0x38, 0x36, 0x31, 0x38, 0x32, 0x39, 0x37, 0x34, 0x35, 0x35, 0x35, 0x37, 0x30, 0x36, 0x37, 0x34, 0x40400135, 0x35, 0x30, 0x35, 0x34, 0x39, 0x34, 0x35, 0x38, 0x404001c5, 0x39, 0x40400051, 0x35, 0x36, 0x404001ec, 0x37, 0x32, 0x31, 0x30, 0x37, 0x39, 0x40400159, 0x33, 0x30, 0x4040010a, 0x33, 0x32, 0x31, 0x31, 0x36, 0x35, 0x33, 0x34, 0x34, 0x39, 0x38, 0x37, 0x32, 0x30, 0x32, 0x37, 0x4040011b, 0x30, 0x32, 0x33, 0x36, 0x34, 0x4040022e, 0x35, 0x34, 0x39, 0x39, 0x31, 0x31, 0x39, 0x38, 0x40400418, 0x34, 0x4040011b, 0x35, 0x33, 0x35, 0x36, 0x36, 0x33, 0x36, 0x39, 0x40400450, 0x32, 0x36, 0x35, 0x404002e4, 0x37, 0x38, 0x36, 0x32, 0x35, 0x35, 0x31, 0x404003da, 0x31, 0x37, 0x35, 0x37, 0x34, 0x36, 0x37, 0x32, 0x38, 0x39, 0x30, 0x39, 0x37, 0x37, 0x37, 0x37, 0x40800453, 0x30, 0x30, 0x30, 0x404005fd, 0x37, 0x30, 0x404004df, 0x36, 0x404003e9, 0x34, 0x39, 0x31, 0x4040041e, 0x40400297, 0x32, 0x31, 0x34, 0x37, 0x37, 0x32, 0x33, 0x35, 0x30, 0x31, 0x34, 0x31, 0x34, 0x40400643, 0x33, 0x35, 0x36, 0x404004af, 0x31, 0x36, 0x31, 0x33, 0x36, 0x31, 0x31, 0x35, 0x37, 0x33, 0x35, 0x32, 0x35, 0x40400504, 0x33, 0x34, 0x4040005b, 0x31, 0x38, 0x4040047b, 0x38, 0x34, 0x404005e7, 0x33, 0x33, 0x32, 0x33, 0x39, 0x30, 0x37, 0x33, 0x39, 0x34, 0x31, 0x34, 0x33, 0x33, 0x33, 0x34, 0x35, 0x34, 0x37, 0x37, 0x36, 0x32, 0x34, 0x40400242, 0x32, 0x35, 0x31, 0x38, 0x39, 0x38, 0x33, 0x35, 0x36, 0x39, 0x34, 0x38, 0x35, 0x35, 0x36, 0x32, 0x30, 0x39, 0x39, 0x32, 0x31, 0x39, 0x32, 0x32, 0x32, 0x31, 0x38, 0x34, 0x32, 0x37, 0x4040023e, 0x32, 0x404000ba, 0x36, 0x38, 0x38, 0x37, 0x36, 0x37, 0x31, 0x37, 0x39, 0x30, 0x40400055, 0x30, 0x40800106, 0x36, 0x36, 0x404003e7, 0x38, 0x38, 0x36, 0x32, 0x37, 0x32, 0x404006dc, 0x31, 0x37, 0x38, 0x36, 0x30, 0x38, 0x35, 0x37, 0x40400073, 0x33, 0x408002fc, 0x37, 0x39, 0x37, 0x36, 0x36, 0x38, 0x31, 0x404002bd, 0x30, 0x30, 0x39, 0x35, 0x33, 0x38, 0x38, 0x40400638, 0x33, 0x404006a5, 0x30, 0x36, 0x38, 0x30, 0x30, 0x36, 0x34, 0x32, 0x32, 0x35, 0x31, 0x32, 0x35, 0x32, 0x4040057b, 0x37, 0x33, 0x39, 0x32, 0x40400297, 0x40400474, 0x34, 0x408006b3, 0x38, 0x36, 0x32, 0x36, 0x39, 0x34, 0x35, 0x404001e5, 0x34, 0x31, 0x39, 0x36, 0x35, 0x32, 0x38, 0x35, 0x30, 0x40400099, 0x4040039c, 0x31, 0x38, 0x36, 0x33, 0x404001be, 0x34, 0x40800154, 0x32, 0x30, 0x33, 0x39, 0x4040058b, 0x34, 0x35, 0x404002bc, 0x32, 0x33, 0x37, 0x4040042c, 0x36, 0x40400510, 0x35, 0x36, 0x40400638, 0x37, 0x31, 0x39, 0x31, 0x37, 0x32, 0x38, 0x40400171, 0x37, 0x36, 0x34, 0x36, 0x35, 0x37, 0x35, 0x37, 0x33, 0x39, 0x40400101, 0x33, 0x38, 0x39, 0x40400748, 0x38, 0x33, 0x32, 0x36, 0x34, 0x35, 0x39, 0x39, 0x35, 0x38, 0x404006a7, 0x30, 0x34, 0x37, 0x38, 0x404001de, 0x40400328, 0x39, 0x4040002d, 0x36, 0x34, 0x30, 0x37, 0x38, 0x39, 0x35, 0x31, 0x4040008e, 0x36, 0x38, 0x33, 0x4040012f, 0x32, 0x35, 0x39, 0x35, 0x37, 0x30, 0x40400468, 0x38, 0x32, 0x32, 0x404002c8, 0x32, 0x4040061b, 0x34, 0x30, 0x37, 0x37, 0x32, 0x36, 0x37, 0x31, 0x39, 0x34, 0x37, 0x38, 0x40400319, 0x38, 0x32, 0x36, 0x30, 0x31, 0x34, 0x37, 0x36, 0x39, 0x39, 0x30, 0x39, 0x404004e8, 0x30, 0x31, 0x33, 0x36, 0x33, 0x39, 0x34, 0x34, 0x33, 0x4040027f, 0x33, 0x30, 0x40400105, 0x32, 0x30, 0x33, 0x34, 0x39, 0x36, 0x32, 0x35, 0x32, 0x34, 0x35, 0x31, 0x37, 0x404003b5, 0x39, 0x36, 0x35, 0x31, 0x34, 0x33, 0x31, 0x34, 0x32, 0x39, 0x38, 0x30, 0x39, 0x31, 0x39, 0x30, 0x36, 0x35, 0x39, 0x32, 0x40400282, 0x37, 0x32, 0x32, 0x31, 0x36, 0x39, 0x36, 0x34, 0x36, 0x40400419, 0x4040007a, 0x35, 0x4040050e, 0x34, 0x40800565, 0x38, 0x40400559, 0x39, 0x37, 0x4040057b, 0x35, 0x34, 0x4040049d, 0x4040023e, 0x37, 0x4040065a, 0x38, 0x34, 0x36, 0x38, 0x31, 0x33, 0x4040008c, 0x36, 0x38, 0x33, 0x38, 0x36, 0x38, 0x39, 0x34, 0x32, 0x37, 0x37, 0x34, 0x31, 0x35, 0x35, 0x39, 0x39, 0x31, 0x38, 0x35, 0x4040005a, 0x32, 0x34, 0x35, 0x39, 0x35, 0x33, 0x39, 0x35, 0x39, 0x34, 0x33, 0x31, 0x404005b7, 0x37, 0x40400012, 0x36, 0x38, 0x30, 0x38, 0x34, 0x35, 0x404002e7, 0x37, 0x33, 0x4040081e, 0x39, 0x35, 0x38, 0x34, 0x38, 0x36, 0x35, 0x33, 0x38, 0x404006e8, 0x36, 0x32, 0x404000f2, 0x36, 0x30, 0x39, 0x404004b6, 0x36, 0x30, 0x38, 0x30, 0x35, 0x31, 0x32, 0x34, 0x33, 0x38, 0x38, 0x34, 0x4040013a, 0x4040000b, 0x34, 0x31, 0x33, 0x4040030f, 0x37, 0x36, 0x32, 0x37, 0x38, 0x40400341, 0x37, 0x31, 0x35, 0x4040059b, 0x33, 0x35, 0x39, 0x39, 0x37, 0x37, 0x30, 0x30, 0x31, 0x32, 0x39, 0x40400472, 0x38, 0x39, 0x34, 0x34, 0x31, 0x40400277, 0x36, 0x38, 0x35, 0x35, 0x4040005f, 0x34, 0x30, 0x36, 0x33, 0x404008e6, 0x32, 0x30, 0x37, 0x32, 0x32, 0x40400158, 0x40800203, 0x34, 0x38, 0x31, 0x35, 0x38, 0x40400205, 0x404001fe, 0x4040027a, 0x40400298, 0x33, 0x39, 0x34, 0x35, 0x32, 0x32, 0x36, 0x37, 0x40c00496, 0x38, 0x4040058a, 0x32, 0x31, 0x404002ea, 0x32, 0x40400387, 0x35, 0x34, 0x36, 0x36, 0x36, 0x4040051b, 0x32, 0x33, 0x39, 0x38, 0x36, 0x34, 0x35, 0x36, 0x404004c4, 0x31, 0x36, 0x33, 0x35, 0x40800253, 0x40400811, 0x37, 0x404008ad, 0x39, 0x38, 0x4040045e, 0x39, 0x33, 0x36, 0x33, 0x34, 0x4040075b, 0x37, 0x34, 0x33, 0x32, 0x34, 0x4040047b, 0x31, 0x35, 0x30, 0x37, 0x36, 0x404004bb, 0x37, 0x39, 0x34, 0x35, 0x31, 0x30, 0x39, 0x4040003e, 0x30, 0x39, 0x34, 0x30, 0x404006a6, 0x38, 0x38, 0x37, 0x39, 0x37, 0x31, 0x30, 0x38, 0x39, 0x33, 0x404008f0, 0x36, 0x39, 0x31, 0x33, 0x36, 0x38, 0x36, 0x37, 0x32, 0x4040025b, 0x404001fe, 0x35, 0x4040053f, 0x40400468, 0x40400801, 0x31, 0x37, 0x39, 0x32, 0x38, 0x36, 0x38, 0x404008cc, 0x38, 0x37, 0x34, 0x37, 0x4080079e, 0x38, 0x32, 0x34, 0x4040097a, 0x38, 0x4040025b, 0x37, 0x31, 0x34, 0x39, 0x30, 0x39, 0x36, 0x37, 0x35, 0x39, 0x38, 0x404006ef, 0x33, 0x36, 0x35, 0x40400134, 0x38, 0x31, 0x4040005c, 0x40400745, 0x40400936, 0x36, 0x38, 0x32, 0x39, 0x4040057e, 0x38, 0x37, 0x32, 0x32, 0x36, 0x35, 0x38, 0x38, 0x30, 0x40400611, 0x35, 0x40400249, 0x34, 0x32, 0x37, 0x30, 0x34, 0x37, 0x37, 0x35, 0x35, 0x4040081e, 0x33, 0x37, 0x39, 0x36, 0x34, 0x31, 0x34, 0x35, 0x31, 0x35, 0x32, 0x404005fd, 0x32, 0x33, 0x34, 0x33, 0x36, 0x34, 0x35, 0x34, 0x404005de, 0x34, 0x34, 0x34, 0x37, 0x39, 0x35, 0x4040003c, 0x40400523, 0x408008e6, 0x34, 0x31, 0x4040052a, 0x33, 0x40400304, 0x35, 0x32, 0x33, 0x31, 0x40800841, 0x31, 0x36, 0x36, 0x31, 0x404008b2, 0x35, 0x39, 0x36, 0x39, 0x35, 0x33, 0x36, 0x32, 0x33, 0x31, 0x34, 0x404005ff, 0x32, 0x34, 0x38, 0x34, 0x39, 0x33, 0x37, 0x31, 0x38, 0x37, 0x31, 0x31, 0x30, 0x31, 0x34, 0x35, 0x37, 0x36, 0x35, 0x34, 0x40400761, 0x30, 0x32, 0x37, 0x39, 0x39, 0x33, 0x34, 0x34, 0x30, 0x33, 0x37, 0x34, 0x32, 0x30, 0x30, 0x37, 0x4040093f, 0x37, 0x38, 0x35, 0x33, 0x39, 0x30, 0x36, 0x32, 0x31, 0x39, 0x40800299, 0x40400345, 0x38, 0x34, 0x37, 0x408003d2, 0x38, 0x33, 0x33, 0x32, 0x31, 0x34, 0x34, 0x35, 0x37, 0x31, 0x40400284, 0x40400776, 0x34, 0x33, 0x35, 0x30, 0x40400928, 0x40400468, 0x35, 0x33, 0x31, 0x39, 0x31, 0x30, 0x34, 0x38, 0x34, 0x38, 0x31, 0x30, 0x30, 0x35, 0x33, 0x37, 0x30, 0x36, 0x404008bc, 0x4080059d, 0x40800781, 0x31, 0x40400559, 0x37, 0x4040031b, 0x35, 0x404007ec, 0x4040040c, 0x36, 0x33, 0x408007dc, 0x34, 0x40400971, 0x4080034e, 0x408003f5, 0x38, 0x4080052d, 0x40800887, 0x39, 0x40400187, 0x39, 0x31, 0x404008ce, 0x38, 0x31, 0x34, 0x36, 0x37, 0x35, 0x31, 0x4040062b, 0x31, 0x32, 0x33, 0x39, 0x40c001a9, 0x39, 0x30, 0x37, 0x31, 0x38, 0x36, 0x34, 0x39, 0x34, 0x32, 0x33, 0x31, 0x39, 0x36, 0x31, 0x35, 0x36, 0x404001ec, 0x404006bc, 0x39, 0x35, 0x40400926, 0x40400469, 0x4040011b, 0x36, 0x30, 0x33, 0x38, 0x40400a25, 0x4040016f, 0x40400384, 0x36, 0x32, 0x4040045a, 0x35, 0x4040084c, 0x36, 0x33, 0x38, 0x39, 0x33, 0x37, 0x37, 0x38, 0x37, 0x404008c5, 0x404000f8, 0x39, 0x37, 0x39, 0x32, 0x30, 0x37, 0x37, 0x33, 0x404005d7, 0x32, 0x31, 0x38, 0x32, 0x35, 0x36, 0x404007df, 0x36, 0x36, 0x404006d6, 0x34, 0x32, 0x4080067e, 0x36, 0x404006e6, 0x34, 0x34, 0x40400024, 0x35, 0x34, 0x39, 0x32, 0x30, 0x32, 0x36, 0x30, 0x35, 0x40400ab3, 0x408003e4, 0x32, 0x30, 0x31, 0x34, 0x39, 0x404004d2, 0x38, 0x35, 0x30, 0x37, 0x33, 0x40400599, 0x36, 0x36, 0x36, 0x30, 0x40400194, 0x32, 0x34, 0x33, 0x34, 0x30, 0x40400087, 0x30, 0x4040076b, 0x38, 0x36, 0x33, 0x40400956, 0x404007e4, 0x4040042b, 0x40400174, 0x35, 0x37, 0x39, 0x36, 0x32, 0x36, 0x38, 0x35, 0x36, 0x40400140, 0x35, 0x30, 0x38, 0x40400523, 0x35, 0x38, 0x37, 0x39, 0x36, 0x39, 0x39, 0x40400711, 0x35, 0x37, 0x34, 0x40400a18, 0x38, 0x34, 0x30, 0x404008b3, 0x31, 0x34, 0x35, 0x39, 0x31, 0x4040078c, 0x37, 0x30, 0x40400234, 0x30, 0x31, 0x40400be7, 0x31, 0x32, 0x40400c74, 0x30, 0x404003c3, 0x33, 0x39, 0x40400b2a, 0x40400112, 0x37, 0x31, 0x35, 0x404003b0, 0x34, 0x32, 0x30, 0x40800bf2, 0x39, 0x40400bc2, 0x30, 0x37, 0x40400341, 0x40400795, 0x40400aaf, 0x40400c62, 0x32, 0x31, 0x40400960, 0x32, 0x35, 0x31, 0x4040057b, 0x40400944, 0x39, 0x32, 0x404001b2, 0x38, 0x32, 0x36, 0x40400b66, 0x32, 0x40400278, 0x33, 0x32, 0x31, 0x35, 0x37, 0x39, 0x31, 0x39, 0x38, 0x34, 0x31, 0x34, 0x4080087b, 0x39, 0x31, 0x36, 0x34, 0x408006e8, 0x39, 0x40800b58, 0x404008db, 0x37, 0x32, 0x32, 0x40400321, 0x35, 0x404008a4, 0x40400141, 0x39, 0x31, 0x30, 0x404000bc, 0x40400c5b, 0x35, 0x32, 0x38, 0x30, 0x31, 0x37, 0x40400231, 0x37, 0x31, 0x32, 0x40400914, 0x38, 0x33, 0x32, 0x40400373, 0x31, 0x40400589, 0x30, 0x39, 0x33, 0x35, 0x33, 0x39, 0x36, 0x35, 0x37, 0x4040064b, 0x31, 0x30, 0x38, 0x33, 0x40400069, 0x35, 0x31, 0x4040077a, 0x40400d5a, 0x31, 0x34, 0x34, 0x34, 0x32, 0x31, 0x30, 0x30, 0x40400202, 0x30, 0x33, 0x4040019c, 0x31, 0x31, 0x30, 0x33, 0x40400c81, 0x40400009, 0x40400026, 0x40c00602, 0x35, 0x31, 0x36, 0x404005d9, 0x40800883, 0x4040092a, 0x35, 0x40800c42, 0x38, 0x35, 0x31, 0x37, 0x31, 0x34, 0x33, 0x37, 0x40400605, 0x4040006d, 0x31, 0x35, 0x35, 0x36, 0x35, 0x30, 0x38, 0x38, 0x404003b9, 0x39, 0x38, 0x39, 0x38, 0x35, 0x39, 0x39, 0x38, 0x32, 0x33, 0x38, 0x404001cf, 0x404009ba, 0x33, 0x4040016c, 0x4040043e, 0x404009c3, 0x38, 0x40800e05, 0x33, 0x32, 0x40400107, 0x35, 0x40400305, 0x33, 0x404001ca, 0x39, 0x4040041b, 0x39, 0x38, 0x4040087d, 0x34, 0x40400cb8, 0x37, 0x4040064b, 0x30, 0x37, 0x404000e5, 0x34, 0x38, 0x31, 0x34, 0x31, 0x40400539, 0x38, 0x35, 0x39, 0x34, 0x36, 0x31, 0x40400bc9, 0x38, 0x30}, + }, + { + input: "testdata/huffman-rand-1k.in", + want: "testdata/huffman-rand-1k.%s.expect", + wantNoInput: "testdata/huffman-rand-1k.%s.expect-noinput", + tokens: []token{0xf8, 0x8b, 0x96, 0x76, 0x48, 0xd, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xd, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa, 0x64, 0xb, 0xe0, 0x23, 0x29, 0xbd, 0xf7, 0xe7, 0x83, 0x3c, 0xfb, 0xdf, 0xb3, 0xae, 0x4f, 0xa4, 0x47, 0x55, 0x99, 0xde, 0x2f, 0x96, 0x6e, 0x1c, 0x43, 0x4c, 0x87, 0xe2, 0x7c, 0xd9, 0x5f, 0x4c, 0x7c, 0xe8, 0x90, 0x3, 0xdb, 0x30, 0x95, 0xd6, 0x22, 0xc, 0x47, 0xb8, 0x4d, 0x6b, 0xbd, 0x24, 0x11, 0xab, 0x2c, 0xd7, 0xbe, 0x6e, 0x7a, 0xd6, 0x8, 0xa3, 0x98, 0xd8, 0xdd, 0x15, 0x6a, 0xfa, 0x93, 0x30, 0x1, 0x25, 0x1d, 0xa2, 0x74, 0x86, 0x4b, 0x6a, 0x95, 0xe8, 0xe1, 0x4e, 0xe, 0x76, 0xb9, 0x49, 0xa9, 0x5f, 0xa0, 0xa6, 0x63, 0x3c, 0x7e, 0x7e, 0x20, 0x13, 0x4f, 0xbb, 0x66, 0x92, 0xb8, 0x2e, 0xa4, 0xfa, 0x48, 0xcb, 0xae, 0xb9, 0x3c, 0xaf, 0xd3, 0x1f, 0xe1, 0xd5, 0x8d, 0x42, 0x6d, 0xf0, 0xfc, 0x8c, 0xc, 0x0, 0xde, 0x40, 0xab, 0x8b, 0x47, 0x97, 0x4e, 0xa8, 0xcf, 0x8e, 0xdb, 0xa6, 0x8b, 0x20, 0x9, 0x84, 0x7a, 0x66, 0xe5, 0x98, 0x29, 0x2, 0x95, 0xe6, 0x38, 0x32, 0x60, 0x3, 0xe3, 0x9a, 0x1e, 0x54, 0xe8, 0x63, 0x80, 0x48, 0x9c, 0xe7, 0x63, 0x33, 0x6e, 0xa0, 0x65, 0x83, 0xfa, 0xc6, 0xba, 0x7a, 0x43, 0x71, 0x5, 0xf5, 0x68, 0x69, 0x85, 0x9c, 0xba, 0x45, 0xcd, 0x6b, 0xb, 0x19, 0xd1, 0xbb, 0x7f, 0x70, 0x85, 0x92, 0xd1, 0xb4, 0x64, 0x82, 0xb1, 0xe4, 0x62, 0xc5, 0x3c, 0x46, 0x1f, 0x92, 0x31, 0x1c, 0x4e, 0x41, 0x77, 0xf7, 0xe7, 0x87, 0xa2, 0xf, 0x6e, 0xe8, 0x92, 0x3, 0x6b, 0xa, 0xe7, 0xa9, 0x3b, 0x11, 0xda, 0x66, 0x8a, 0x29, 0xda, 0x79, 0xe1, 0x64, 0x8d, 0xe3, 0x54, 0xd4, 0xf5, 0xef, 0x64, 0x87, 0x3b, 0xf4, 0xc2, 0xf4, 0x71, 0x13, 0xa9, 0xe9, 0xe0, 0xa2, 0x6, 0x14, 0xab, 0x5d, 0xa7, 0x96, 0x0, 0xd6, 0xc3, 0xcc, 0x57, 0xed, 0x39, 0x6a, 0x25, 0xcd, 0x76, 0xea, 0xba, 0x3a, 0xf2, 0xa1, 0x95, 0x5d, 0xe5, 0x71, 0xcf, 0x9c, 0x62, 0x9e, 0x6a, 0xfa, 0xd5, 0x31, 0xd1, 0xa8, 0x66, 0x30, 0x33, 0xaa, 0x51, 0x17, 0x13, 0x82, 0x99, 0xc8, 0x14, 0x60, 0x9f, 0x4d, 0x32, 0x6d, 0xda, 0x19, 0x26, 0x21, 0xdc, 0x7e, 0x2e, 0x25, 0x67, 0x72, 0xca, 0xf, 0x92, 0xcd, 0xf6, 0xd6, 0xcb, 0x97, 0x8a, 0x33, 0x58, 0x73, 0x70, 0x91, 0x1d, 0xbf, 0x28, 0x23, 0xa3, 0xc, 0xf1, 0x83, 0xc3, 0xc8, 0x56, 0x77, 0x68, 0xe3, 0x82, 0xba, 0xb9, 0x57, 0x56, 0x57, 0x9c, 0xc3, 0xd6, 0x14, 0x5, 0x3c, 0xb1, 0xaf, 0x93, 0xc8, 0x8a, 0x57, 0x7f, 0x53, 0xfa, 0x2f, 0xaa, 0x6e, 0x66, 0x83, 0xfa, 0x33, 0xd1, 0x21, 0xab, 0x1b, 0x71, 0xb4, 0x7c, 0xda, 0xfd, 0xfb, 0x7f, 0x20, 0xab, 0x5e, 0xd5, 0xca, 0xfd, 0xdd, 0xe0, 0xee, 0xda, 0xba, 0xa8, 0x27, 0x99, 0x97, 0x69, 0xc1, 0x3c, 0x82, 0x8c, 0xa, 0x5c, 0x2d, 0x5b, 0x88, 0x3e, 0x34, 0x35, 0x86, 0x37, 0x46, 0x79, 0xe1, 0xaa, 0x19, 0xfb, 0xaa, 0xde, 0x15, 0x9, 0xd, 0x1a, 0x57, 0xff, 0xb5, 0xf, 0xf3, 0x2b, 0x5a, 0x6a, 0x4d, 0x19, 0x77, 0x71, 0x45, 0xdf, 0x4f, 0xb3, 0xec, 0xf1, 0xeb, 0x18, 0x53, 0x3e, 0x3b, 0x47, 0x8, 0x9a, 0x73, 0xa0, 0x5c, 0x8c, 0x5f, 0xeb, 0xf, 0x3a, 0xc2, 0x43, 0x67, 0xb4, 0x66, 0x67, 0x80, 0x58, 0xe, 0xc1, 0xec, 0x40, 0xd4, 0x22, 0x94, 0xca, 0xf9, 0xe8, 0x92, 0xe4, 0x69, 0x38, 0xbe, 0x67, 0x64, 0xca, 0x50, 0xc7, 0x6, 0x67, 0x42, 0x6e, 0xa3, 0xf0, 0xb7, 0x6c, 0xf2, 0xe8, 0x5f, 0xb1, 0xaf, 0xe7, 0xdb, 0xbb, 0x77, 0xb5, 0xf8, 0xcb, 0x8, 0xc4, 0x75, 0x7e, 0xc0, 0xf9, 0x1c, 0x7f, 0x3c, 0x89, 0x2f, 0xd2, 0x58, 0x3a, 0xe2, 0xf8, 0x91, 0xb6, 0x7b, 0x24, 0x27, 0xe9, 0xae, 0x84, 0x8b, 0xde, 0x74, 0xac, 0xfd, 0xd9, 0xb7, 0x69, 0x2a, 0xec, 0x32, 0x6f, 0xf0, 0x92, 0x84, 0xf1, 0x40, 0xc, 0x8a, 0xbc, 0x39, 0x6e, 0x2e, 0x73, 0xd4, 0x6e, 0x8a, 0x74, 0x2a, 0xdc, 0x60, 0x1f, 0xa3, 0x7, 0xde, 0x75, 0x8b, 0x74, 0xc8, 0xfe, 0x63, 0x75, 0xf6, 0x3d, 0x63, 0xac, 0x33, 0x89, 0xc3, 0xf0, 0xf8, 0x2d, 0x6b, 0xb4, 0x9e, 0x74, 0x8b, 0x5c, 0x33, 0xb4, 0xca, 0xa8, 0xe4, 0x99, 0xb6, 0x90, 0xa1, 0xef, 0xf, 0xd3, 0x61, 0xb2, 0xc6, 0x1a, 0x94, 0x7c, 0x44, 0x55, 0xf4, 0x45, 0xff, 0x9e, 0xa5, 0x5a, 0xc6, 0xa0, 0xe8, 0x2a, 0xc1, 0x8d, 0x6f, 0x34, 0x11, 0xb9, 0xbe, 0x4e, 0xd9, 0x87, 0x97, 0x73, 0xcf, 0x3d, 0x23, 0xae, 0xd5, 0x1a, 0x5e, 0xae, 0x5d, 0x6a, 0x3, 0xf9, 0x22, 0xd, 0x10, 0xd9, 0x47, 0x69, 0x15, 0x3f, 0xee, 0x52, 0xa3, 0x8, 0xd2, 0x3c, 0x51, 0xf4, 0xf8, 0x9d, 0xe4, 0x98, 0x89, 0xc8, 0x67, 0x39, 0xd5, 0x5e, 0x35, 0x78, 0x27, 0xe8, 0x3c, 0x80, 0xae, 0x79, 0x71, 0xd2, 0x93, 0xf4, 0xaa, 0x51, 0x12, 0x1c, 0x4b, 0x1b, 0xe5, 0x6e, 0x15, 0x6f, 0xe4, 0xbb, 0x51, 0x9b, 0x45, 0x9f, 0xf9, 0xc4, 0x8c, 0x2a, 0xfb, 0x1a, 0xdf, 0x55, 0xd3, 0x48, 0x93, 0x27, 0x1, 0x26, 0xc2, 0x6b, 0x55, 0x6d, 0xa2, 0xfb, 0x84, 0x8b, 0xc9, 0x9e, 0x28, 0xc2, 0xef, 0x1a, 0x24, 0xec, 0x9b, 0xae, 0xbd, 0x60, 0xe9, 0x15, 0x35, 0xee, 0x42, 0xa4, 0x33, 0x5b, 0xfa, 0xf, 0xb6, 0xf7, 0x1, 0xa6, 0x2, 0x4c, 0xca, 0x90, 0x58, 0x3a, 0x96, 0x41, 0xe7, 0xcb, 0x9, 0x8c, 0xdb, 0x85, 0x4d, 0xa8, 0x89, 0xf3, 0xb5, 0x8e, 0xfd, 0x75, 0x5b, 0x4f, 0xed, 0xde, 0x3f, 0xeb, 0x38, 0xa3, 0xbe, 0xb0, 0x73, 0xfc, 0xb8, 0x54, 0xf7, 0x4c, 0x30, 0x67, 0x2e, 0x38, 0xa2, 0x54, 0x18, 0xba, 0x8, 0xbf, 0xf2, 0x39, 0xd5, 0xfe, 0xa5, 0x41, 0xc6, 0x66, 0x66, 0xba, 0x81, 0xef, 0x67, 0xe4, 0xe6, 0x3c, 0xc, 0xca, 0xa4, 0xa, 0x79, 0xb3, 0x57, 0x8b, 0x8a, 0x75, 0x98, 0x18, 0x42, 0x2f, 0x29, 0xa3, 0x82, 0xef, 0x9f, 0x86, 0x6, 0x23, 0xe1, 0x75, 0xfa, 0x8, 0xb1, 0xde, 0x17, 0x4a}, + }, + { + input: "testdata/huffman-rand-limit.in", + want: "testdata/huffman-rand-limit.%s.expect", + wantNoInput: "testdata/huffman-rand-limit.%s.expect-noinput", + tokens: []token{0x61, 0x51c00000, 0xa, 0xf8, 0x8b, 0x96, 0x76, 0x48, 0xa, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xa, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa}, + }, + { + input: "testdata/huffman-shifts.in", + want: "testdata/huffman-shifts.%s.expect", + wantNoInput: "testdata/huffman-shifts.%s.expect-noinput", + tokens: []token{0x31, 0x30, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x52400001, 0xd, 0xa, 0x32, 0x33, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7f400001}, + }, + { + input: "testdata/huffman-text-shift.in", + want: "testdata/huffman-text-shift.%s.expect", + wantNoInput: "testdata/huffman-text-shift.%s.expect-noinput", + tokens: []token{0x2f, 0x2f, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x32, 0x30, 0x30, 0x39, 0x54, 0x68, 0x47, 0x6f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x6c, 0x40800016, 0x72, 0x72, 0x76, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x55, 0x6f, 0x66, 0x74, 0x68, 0x69, 0x6f, 0x75, 0x72, 0x63, 0x63, 0x6f, 0x64, 0x69, 0x67, 0x6f, 0x76, 0x72, 0x6e, 0x64, 0x62, 0x79, 0x42, 0x53, 0x44, 0x2d, 0x74, 0x79, 0x6c, 0x40400020, 0x6c, 0x69, 0x63, 0x6e, 0x74, 0x68, 0x74, 0x63, 0x6e, 0x62, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x74, 0x68, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x66, 0x69, 0x6c, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x63, 0x6b, 0x67, 0x6d, 0x69, 0x6e, 0x4040000a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x6f, 0x22, 0x4040000c, 0x66, 0x75, 0x6e, 0x63, 0x6d, 0x69, 0x6e, 0x28, 0x29, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x72, 0x62, 0x3d, 0x6d, 0x6b, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x2c, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x5f, 0x3a, 0x3d, 0x6f, 0x2e, 0x43, 0x72, 0x74, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x40800021, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x58, 0x78, 0x79, 0x7a, 0x21, 0x22, 0x23, 0xc2, 0xa4, 0x25, 0x26, 0x2f, 0x3f, 0x22}, + }, + { + input: "testdata/huffman-text.in", + want: "testdata/huffman-text.%s.expect", + wantNoInput: "testdata/huffman-text.%s.expect-noinput", + tokens: []token{0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x32, 0x30, 0x30, 0x39, 0x20, 0x54, 0x68, 0x65, 0x20, 0x47, 0x6f, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x73, 0x2e, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x4080001e, 0x73, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x20, 0x55, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x73, 0x20, 0x67, 0x6f, 0x76, 0x65, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x61, 0x20, 0x42, 0x53, 0x44, 0x2d, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x40800036, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x20, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x20, 0x6d, 0x61, 0x69, 0x6e, 0x4040000f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x22, 0x6f, 0x73, 0x22, 0x4040000e, 0x66, 0x75, 0x6e, 0x63, 0x4080001b, 0x28, 0x29, 0x20, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x61, 0x72, 0x20, 0x62, 0x20, 0x3d, 0x20, 0x6d, 0x61, 0x6b, 0x65, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x65, 0x2c, 0x20, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x20, 0x5f, 0x20, 0x3a, 0x3d, 0x20, 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x61, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x61, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x4080002a, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa}, + }, + { + input: "testdata/huffman-zero.in", + want: "testdata/huffman-zero.%s.expect", + wantNoInput: "testdata/huffman-zero.%s.expect-noinput", + tokens: []token{0x30, ml, 0x4b800000}, + }, + { + input: "", + want: "", + wantNoInput: "testdata/null-long-match.%s.expect-noinput", + tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x41400000}, + }, +} + +// TestWriteBlock tests if the writeBlock encoding has changed. +// To update the reference files use the "-update" flag on the test. +func TestWriteBlock(t *testing.T) { + for _, test := range writeBlockTests { + testBlock(t, test, "wb") + } +} + +// TestWriteBlockDynamic tests if the writeBlockDynamic encoding has changed. +// To update the reference files use the "-update" flag on the test. +func TestWriteBlockDynamic(t *testing.T) { + for _, test := range writeBlockTests { + testBlock(t, test, "dyn") + } +} + +// testBlock tests a block against its references, +// or regenerate the references, if "-update" flag is set. +func testBlock(t *testing.T, test huffTest, ttype string) { + if test.want != "" { + test.want = fmt.Sprintf(test.want, ttype) + } + test.wantNoInput = fmt.Sprintf(test.wantNoInput, ttype) + if *update { + if test.input != "" { + t.Logf("Updating %q", test.want) + input, err := ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + + f, err := os.Create(test.want) + if err != nil { + t.Error(err) + return + } + defer f.Close() + bw := newHuffmanBitWriter(f) + writeToType(t, ttype, bw, test.tokens, input) + } + + t.Logf("Updating %q", test.wantNoInput) + f, err := os.Create(test.wantNoInput) + if err != nil { + t.Error(err) + return + } + defer f.Close() + bw := newHuffmanBitWriter(f) + writeToType(t, ttype, bw, test.tokens, nil) + return + } + + if test.input != "" { + t.Logf("Testing %q", test.want) + input, err := ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + want, err := ioutil.ReadFile(test.want) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + writeToType(t, ttype, bw, test.tokens, input) + + got := buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+".got") + if err := ioutil.WriteFile(test.want+".got", got, 0666); err != nil { + t.Error(err) + } + } + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + writeToType(t, ttype, bw, test.tokens, input) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("reset: writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+".reset.got") + if err := ioutil.WriteFile(test.want+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "wb", test, true) + } + t.Logf("Testing %q", test.wantNoInput) + wantNI, err := ioutil.ReadFile(test.wantNoInput) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + writeToType(t, ttype, bw, test.tokens, nil) + + got := buf.Bytes() + if !bytes.Equal(got, wantNI) { + t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.wantNoInput, test.wantNoInput+".got") + if err := ioutil.WriteFile(test.want+".got", got, 0666); err != nil { + t.Error(err) + } + } else if got[0]&1 == 1 { + t.Error("got unexpected EOF") + return + } + + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + writeToType(t, ttype, bw, test.tokens, nil) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, wantNI) { + t.Errorf("reset: writeBlock did not yield expected result for file %q without input. See %q", test.want, test.want+".reset.got") + if err := ioutil.WriteFile(test.want+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "wb", test, false) +} + +func writeToType(t *testing.T, ttype string, bw *huffmanBitWriter, tok []token, input []byte) { + switch ttype { + case "wb": + bw.writeBlock(tok, false, input) + case "dyn": + bw.writeBlockDynamic(tok, false, input) + default: + panic("unknown test type") + } + + if bw.err != nil { + t.Error(bw.err) + return + } + + bw.flush() + if bw.err != nil { + t.Error(bw.err) + return + } +} + +// testWriterEOF tests if the written block contains an EOF marker. +func testWriterEOF(t *testing.T, ttype string, test huffTest, useInput bool) { + if useInput && test.input == "" { + return + } + var input []byte + if useInput { + var err error + input, err = ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + switch ttype { + case "wb": + bw.writeBlock(test.tokens, true, input) + case "dyn": + bw.writeBlockDynamic(test.tokens, true, input) + case "huff": + bw.writeBlockHuff(true, input) + default: + panic("unknown test type") + } + if bw.err != nil { + t.Error(bw.err) + return + } + + bw.flush() + if bw.err != nil { + t.Error(bw.err) + return + } + b := buf.Bytes() + if len(b) == 0 { + t.Error("no output received") + return + } + if b[0]&1 != 1 { + t.Errorf("block not marked with EOF for input %q", test.input) + return + } + t.Log("EOF ok") +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 00000000000..bdcbd823b00 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "sort" +) + +// hcode is a huffman code with a bit code and bit length. +type hcode struct { + code, len uint16 +} + +type huffmanEncoder struct { + codes []hcode + freqcache []literalNode + bitCount [17]int32 + lns byLiteral // stored to avoid repeated allocation in generate + lfs byFreq // stored to avoid repeated allocation in generate +} + +type literalNode struct { + literal uint16 + freq int32 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint16) { + h.len = length + h.code = code +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + return &huffmanEncoder{codes: make([]hcode, size)} +} + +// Generates a HuffmanCode corresponding to the fixed literal table +func generateFixedLiteralEncoding() *huffmanEncoder { + h := newHuffmanEncoder(maxNumLit) + codes := h.codes + var ch uint16 + for ch = 0; ch < maxNumLit; ch++ { + var bits uint16 + var size uint16 + switch { + case ch < 144: + // size 8, 000110000 .. 10111111 + bits = ch + 48 + size = 8 + break + case ch < 256: + // size 9, 110010000 .. 111111111 + bits = ch + 400 - 144 + size = 9 + break + case ch < 280: + // size 7, 0000000 .. 0010111 + bits = ch - 256 + size = 7 + break + default: + // size 8, 11000000 .. 11000111 + bits = ch + 192 - 280 + size = 8 + } + codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + } + return h +} + +func generateFixedOffsetEncoding() *huffmanEncoder { + h := newHuffmanEncoder(30) + codes := h.codes + for ch := range codes { + codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} + } + return h +} + +var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() +var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() + +func (h *huffmanEncoder) bitLength(freq []int32) int { + var total int + for i, f := range freq { + if f != 0 { + total += int(f) * int(h.codes[i].len) + } + } + return total +} + +const maxBitsLimit = 16 + +// Return the number of literals assigned to each bit size in the Huffman encoding +// +// This method is only called when list.length >= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// maxBits The maximum number of bits that should be used to encode any literal. +// Must be less than 16. +// return An integer array in which array[i] indicates the number of literals +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: list[1].freq, + nextCharFreq: list[2].freq, + nextPairFreq: list[0].freq + list[1].freq, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := maxBits + for { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + l.nextCharFreq = list[n].freq + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + h.lns.sort(chunk) + for _, node := range chunk { + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { + if h.freqcache == nil { + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. + // The largest of these is maxNumLit, so we allocate for that case. + h.freqcache = make([]literalNode, maxNumLit+1) + } + list := h.freqcache[:len(freq)+1] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + list[count] = literalNode{} + h.codes[i].len = 0 + } + } + list[len(freq)] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + h.lfs.sort(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +type byLiteral []literalNode + +func (s *byLiteral) sort(a []literalNode) { + *s = byLiteral(a) + sort.Sort(s) +} + +func (s byLiteral) Len() int { return len(s) } + +func (s byLiteral) Less(i, j int) bool { + return s[i].literal < s[j].literal +} + +func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type byFreq []literalNode + +func (s *byFreq) sort(a []literalNode) { + *s = byFreq(a) + sort.Sort(s) +} + +func (s byFreq) Len() int { return len(s) } + +func (s byFreq) Less(i, j int) bool { + if s[i].freq == s[j].freq { + return s[i].literal < s[j].literal + } + return s[i].freq < s[j].freq +} + +func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 00000000000..800d0ce9e54 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,880 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "io" + "math/bits" + "strconv" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code +) + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) +} + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Read +} + +func (e *ReadError) Error() string { + return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Write +} + +func (e *WriteError) Error() string { + return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks *[huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = &[huffmanNumChunks]uint32{} + } + if h.min != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint32, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Input bits, in top of b. + b uint32 + nb uint + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + stepState int + final bool + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlock() + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlock() + default: + // 3 is reserved. + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.step(f) + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + return err + } + } + rep += int(f.b & uint32(1<>= nb + f.nb -= nb + if i+rep > n { + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the min bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.min < f.bits[endBlockMarker] { + f.h1.min = f.bits[endBlockMarker] + } + + return nil +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBlock() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + v, err := f.huffSym(f.hl) + if err != nil { + f.err = err + return + } + var n uint // number of bits extra + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + length += int(f.b & uint32(1<>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + extra |= int(f.b & uint32(1<>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + f.nb = 0 + f.b = 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[0:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := int(f.buf[0]) | int(f.buf[1])<<8 + nn := int(f.buf[2]) | int(f.buf[3])<<8 + if uint16(nn) != uint16(^n) { + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = n + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = (*decompressor).nextBlock +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.min) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & 31) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & 31) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: (*decompressor).nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_test.go b/vendor/github.com/klauspost/compress/flate/inflate_test.go new file mode 100644 index 00000000000..8402c0c5299 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_test.go @@ -0,0 +1,282 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "crypto/rand" + "io" + "io/ioutil" + "strconv" + "strings" + "testing" +) + +func TestReset(t *testing.T) { + ss := []string{ + "lorem ipsum izzle fo rizzle", + "the quick brown fox jumped over", + } + + deflated := make([]bytes.Buffer, 2) + for i, s := range ss { + w, _ := NewWriter(&deflated[i], 1) + w.Write([]byte(s)) + w.Close() + } + + inflated := make([]bytes.Buffer, 2) + + f := NewReader(&deflated[0]) + io.Copy(&inflated[0], f) + f.(Resetter).Reset(&deflated[1], nil) + io.Copy(&inflated[1], f) + f.Close() + + for i, s := range ss { + if s != inflated[i].String() { + t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s) + } + } +} + +func TestReaderTruncated(t *testing.T) { + vectors := []struct{ input, output string }{ + {"\x00", ""}, + {"\x00\f", ""}, + {"\x00\f\x00", ""}, + {"\x00\f\x00\xf3\xff", ""}, + {"\x00\f\x00\xf3\xffhello", "hello"}, + {"\x00\f\x00\xf3\xffhello, world", "hello, world"}, + {"\x02", ""}, + {"\xf2H\xcd", "He"}, + {"\xf2H͙0a\u0084\t", "Hel\x90\x90\x90\x90\x90"}, + {"\xf2H͙0a\u0084\t\x00", "Hel\x90\x90\x90\x90\x90"}, + } + + for i, v := range vectors { + r := strings.NewReader(v.input) + zr := NewReader(r) + b, err := ioutil.ReadAll(zr) + if err != io.ErrUnexpectedEOF { + t.Errorf("test %d, error mismatch: got %v, want io.ErrUnexpectedEOF", i, err) + } + if string(b) != v.output { + t.Errorf("test %d, output mismatch: got %q, want %q", i, b, v.output) + } + } +} + +func TestResetDict(t *testing.T) { + dict := []byte("the lorem fox") + ss := []string{ + "lorem ipsum izzle fo rizzle", + "the quick brown fox jumped over", + } + + deflated := make([]bytes.Buffer, len(ss)) + for i, s := range ss { + w, _ := NewWriterDict(&deflated[i], DefaultCompression, dict) + w.Write([]byte(s)) + w.Close() + } + + inflated := make([]bytes.Buffer, len(ss)) + + f := NewReader(nil) + for i := range inflated { + f.(Resetter).Reset(&deflated[i], dict) + io.Copy(&inflated[i], f) + } + f.Close() + + for i, s := range ss { + if s != inflated[i].String() { + t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s) + } + } +} + +// Tests ported from zlib/test/infcover.c +type infTest struct { + hex string + id string + n int +} + +var infTests = []infTest{ + {"0 0 0 0 0", "invalid stored block lengths", 1}, + {"3 0", "fixed", 0}, + {"6", "invalid block type", 1}, + {"1 1 0 fe ff 0", "stored", 0}, + {"fc 0 0", "too many length or distance symbols", 1}, + {"4 0 fe ff", "invalid code lengths set", 1}, + {"4 0 24 49 0", "invalid bit length repeat", 1}, + {"4 0 24 e9 ff ff", "invalid bit length repeat", 1}, + {"4 0 24 e9 ff 6d", "invalid code -- missing end-of-block", 1}, + {"4 80 49 92 24 49 92 24 71 ff ff 93 11 0", "invalid literal/lengths set", 1}, + {"4 80 49 92 24 49 92 24 f b4 ff ff c3 84", "invalid distances set", 1}, + {"4 c0 81 8 0 0 0 0 20 7f eb b 0 0", "invalid literal/length code", 1}, + {"2 7e ff ff", "invalid distance code", 1}, + {"c c0 81 0 0 0 0 0 90 ff 6b 4 0", "invalid distance too far back", 1}, + + // also trailer mismatch just in inflate() + {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 1", "incorrect data check", -1}, + {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 1", "incorrect length check", -1}, + {"5 c0 21 d 0 0 0 80 b0 fe 6d 2f 91 6c", "pull 17", 0}, + {"5 e0 81 91 24 cb b2 2c 49 e2 f 2e 8b 9a 47 56 9f fb fe ec d2 ff 1f", "long code", 0}, + {"ed c0 1 1 0 0 0 40 20 ff 57 1b 42 2c 4f", "length extra", 0}, + {"ed cf c1 b1 2c 47 10 c4 30 fa 6f 35 1d 1 82 59 3d fb be 2e 2a fc f c", "long distance and extra", 0}, + {"ed c0 81 0 0 0 0 80 a0 fd a9 17 a9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6", "window end", 0}, +} + +func TestInflate(t *testing.T) { + for _, test := range infTests { + hex := strings.Split(test.hex, " ") + data := make([]byte, len(hex)) + for i, h := range hex { + b, _ := strconv.ParseInt(h, 16, 32) + data[i] = byte(b) + } + buf := bytes.NewReader(data) + r := NewReader(buf) + + _, err := io.Copy(ioutil.Discard, r) + if (test.n == 0 && err == nil) || (test.n != 0 && err != nil) { + t.Logf("%q: OK:", test.id) + t.Logf(" - got %v", err) + continue + } + + if test.n == 0 && err != nil { + t.Errorf("%q: Expected no error, but got %v", test.id, err) + continue + } + + if test.n != 0 && err == nil { + t.Errorf("%q:Expected an error, but got none", test.id) + continue + } + t.Fatal(test.n, err) + } + + for _, test := range infOutTests { + hex := strings.Split(test.hex, " ") + data := make([]byte, len(hex)) + for i, h := range hex { + b, _ := strconv.ParseInt(h, 16, 32) + data[i] = byte(b) + } + buf := bytes.NewReader(data) + r := NewReader(buf) + + _, err := io.Copy(ioutil.Discard, r) + if test.err == (err != nil) { + t.Logf("%q: OK:", test.id) + t.Logf(" - got %v", err) + continue + } + + if test.err == false && err != nil { + t.Errorf("%q: Expected no error, but got %v", test.id, err) + continue + } + + if test.err && err == nil { + t.Errorf("%q: Expected an error, but got none", test.id) + continue + } + t.Fatal(test.err, err) + } + +} + +// Tests ported from zlib/test/infcover.c +// Since zlib inflate is push (writer) instead of pull (reader) +// some of the window size tests have been removed, since they +// are irrelevant. +type infOutTest struct { + hex string + id string + step int + win int + length int + err bool +} + +var infOutTests = []infOutTest{ + {"2 8 20 80 0 3 0", "inflate_fast TYPE return", 0, -15, 258, false}, + {"63 18 5 40 c 0", "window wrap", 3, -8, 300, false}, + {"e5 e0 81 ad 6d cb b2 2c c9 01 1e 59 63 ae 7d ee fb 4d fd b5 35 41 68 ff 7f 0f 0 0 0", "fast length extra bits", 0, -8, 258, true}, + {"25 fd 81 b5 6d 59 b6 6a 49 ea af 35 6 34 eb 8c b9 f6 b9 1e ef 67 49 50 fe ff ff 3f 0 0", "fast distance extra bits", 0, -8, 258, true}, + {"3 7e 0 0 0 0 0", "fast invalid distance code", 0, -8, 258, true}, + {"1b 7 0 0 0 0 0", "fast invalid literal/length code", 0, -8, 258, true}, + {"d c7 1 ae eb 38 c 4 41 a0 87 72 de df fb 1f b8 36 b1 38 5d ff ff 0", "fast 2nd level codes and too far back", 0, -8, 258, true}, + {"63 18 5 8c 10 8 0 0 0 0", "very common case", 0, -8, 259, false}, + {"63 60 60 18 c9 0 8 18 18 18 26 c0 28 0 29 0 0 0", "contiguous and wrap around window", 6, -8, 259, false}, + {"63 0 3 0 0 0 0 0", "copy direct from output", 0, -8, 259, false}, + {"1f 8b 0 0", "bad gzip method", 0, 31, 0, true}, + {"1f 8b 8 80", "bad gzip flags", 0, 31, 0, true}, + {"77 85", "bad zlib method", 0, 15, 0, true}, + {"78 9c", "bad zlib window size", 0, 8, 0, true}, + {"1f 8b 8 1e 0 0 0 0 0 0 1 0 0 0 0 0 0", "bad header crc", 0, 47, 1, true}, + {"1f 8b 8 2 0 0 0 0 0 0 1d 26 3 0 0 0 0 0 0 0 0 0", "check gzip length", 0, 47, 0, true}, + {"78 90", "bad zlib header check", 0, 47, 0, true}, + {"8 b8 0 0 0 1", "need dictionary", 0, 8, 0, true}, + {"63 18 68 30 d0 0 0", "force split window update", 4, -8, 259, false}, + {"3 0", "use fixed blocks", 0, -15, 1, false}, + {"", "bad window size", 0, 1, 0, true}, +} + +func TestWriteTo(t *testing.T) { + input := make([]byte, 100000) + n, err := rand.Read(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + compressed := &bytes.Buffer{} + w, err := NewWriter(compressed, -2) + if err != nil { + t.Fatal(err) + } + n, err = w.Write(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + w.Close() + buf := compressed.Bytes() + + dec := NewReader(bytes.NewBuffer(buf)) + // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure. + readall, err := ioutil.ReadAll(ioutil.NopCloser(dec)) + if err != nil { + t.Fatal(err) + } + if len(readall) != len(input) { + t.Fatal("did not decompress everything") + } + + dec = NewReader(bytes.NewBuffer(buf)) + wtbuf := &bytes.Buffer{} + written, err := dec.(io.WriterTo).WriteTo(wtbuf) + if err != nil { + t.Fatal(err) + } + if written != int64(len(input)) { + t.Error("Returned length did not match, expected", len(input), "got", written) + } + if wtbuf.Len() != len(input) { + t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len()) + } + if bytes.Compare(wtbuf.Bytes(), input) != 0 { + t.Fatal("output did not match input") + } +} diff --git a/vendor/github.com/klauspost/compress/flate/reader_test.go b/vendor/github.com/klauspost/compress/flate/reader_test.go new file mode 100644 index 00000000000..55439646d74 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reader_test.go @@ -0,0 +1,106 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "io" + "io/ioutil" + "runtime" + "strings" + "testing" +) + +func TestNlitOutOfRange(t *testing.T) { + // Trying to decode this bogus flate data, which has a Huffman table + // with nlit=288, should not panic. + io.Copy(ioutil.Discard, NewReader(strings.NewReader( + "\xfc\xfe\x36\xe7\x5e\x1c\xef\xb3\x55\x58\x77\xb6\x56\xb5\x43\xf4"+ + "\x6f\xf2\xd2\xe6\x3d\x99\xa0\x85\x8c\x48\xeb\xf8\xda\x83\x04\x2a"+ + "\x75\xc4\xf8\x0f\x12\x11\xb9\xb4\x4b\x09\xa0\xbe\x8b\x91\x4c"))) +} + +const ( + digits = iota + twain + random +) + +var testfiles = []string{ + // Digits is the digits of the irrational number e. Its decimal representation + // does not repeat, but there are only 10 possible digits, so it should be + // reasonably compressible. + digits: "../testdata/e.txt", + // Twain is Project Gutenberg's edition of Mark Twain's classic English novel. + twain: "../testdata/Mark.Twain-Tom.Sawyer.txt", + // Random bytes + random: "../testdata/sharnd.out", +} + +func benchmarkDecode(b *testing.B, testfile, level, n int) { + b.ReportAllocs() + b.StopTimer() + b.SetBytes(int64(n)) + buf0, err := ioutil.ReadFile(testfiles[testfile]) + if err != nil { + b.Fatal(err) + } + if len(buf0) == 0 { + b.Fatalf("test file %q has no data", testfiles[testfile]) + } + compressed := new(bytes.Buffer) + w, err := NewWriter(compressed, level) + if err != nil { + b.Fatal(err) + } + for i := 0; i < n; i += len(buf0) { + if len(buf0) > n-i { + buf0 = buf0[:n-i] + } + io.Copy(w, bytes.NewReader(buf0)) + } + w.Close() + buf1 := compressed.Bytes() + buf0, compressed, w = nil, nil, nil + runtime.GC() + b.StartTimer() + r := NewReader(bytes.NewReader(buf1)) + res := r.(Resetter) + for i := 0; i < b.N; i++ { + res.Reset(bytes.NewReader(buf1), nil) + io.Copy(ioutil.Discard, r) + } +} + +// These short names are so that gofmt doesn't break the BenchmarkXxx function +// bodies below over multiple lines. +const ( + constant = ConstantCompression + speed = BestSpeed + default_ = DefaultCompression + compress = BestCompression +) + +func BenchmarkDecodeDigitsSpeed1e4(b *testing.B) { benchmarkDecode(b, digits, speed, 1e4) } +func BenchmarkDecodeDigitsSpeed1e5(b *testing.B) { benchmarkDecode(b, digits, speed, 1e5) } +func BenchmarkDecodeDigitsSpeed1e6(b *testing.B) { benchmarkDecode(b, digits, speed, 1e6) } +func BenchmarkDecodeDigitsDefault1e4(b *testing.B) { benchmarkDecode(b, digits, default_, 1e4) } +func BenchmarkDecodeDigitsDefault1e5(b *testing.B) { benchmarkDecode(b, digits, default_, 1e5) } +func BenchmarkDecodeDigitsDefault1e6(b *testing.B) { benchmarkDecode(b, digits, default_, 1e6) } +func BenchmarkDecodeDigitsCompress1e4(b *testing.B) { benchmarkDecode(b, digits, compress, 1e4) } +func BenchmarkDecodeDigitsCompress1e5(b *testing.B) { benchmarkDecode(b, digits, compress, 1e5) } +func BenchmarkDecodeDigitsCompress1e6(b *testing.B) { benchmarkDecode(b, digits, compress, 1e6) } +func BenchmarkDecodeTwainSpeed1e4(b *testing.B) { benchmarkDecode(b, twain, speed, 1e4) } +func BenchmarkDecodeTwainSpeed1e5(b *testing.B) { benchmarkDecode(b, twain, speed, 1e5) } +func BenchmarkDecodeTwainSpeed1e6(b *testing.B) { benchmarkDecode(b, twain, speed, 1e6) } +func BenchmarkDecodeTwainDefault1e4(b *testing.B) { benchmarkDecode(b, twain, default_, 1e4) } +func BenchmarkDecodeTwainDefault1e5(b *testing.B) { benchmarkDecode(b, twain, default_, 1e5) } +func BenchmarkDecodeTwainDefault1e6(b *testing.B) { benchmarkDecode(b, twain, default_, 1e6) } +func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) } +func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) } +func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) } +func BenchmarkDecodeRandomSpeed1e4(b *testing.B) { benchmarkDecode(b, random, speed, 1e4) } +func BenchmarkDecodeRandomSpeed1e5(b *testing.B) { benchmarkDecode(b, random, speed, 1e5) } +func BenchmarkDecodeRandomSpeed1e6(b *testing.B) { benchmarkDecode(b, random, speed, 1e6) } diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go new file mode 100644 index 00000000000..c1a02720d1a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reverse_bits.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +var reverseByte = [256]byte{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +} + +func reverseUint16(v uint16) uint16 { + return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return reverseUint16(number << uint8(16-bitLength)) +} diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go new file mode 100644 index 00000000000..d853320a758 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/snappy.go @@ -0,0 +1,900 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + ol := int(dst.n) + for i, v := range lit { + dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + } + dst.n += uint16(len(lit)) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst *tokens, offset, length int) { + dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) + dst.n++ +} + +type snappyEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newSnappy(level int) snappyEnc { + switch level { + case 1: + return &snappyL1{} + case 2: + return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 3: + return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 4: + return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 14 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset +) + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func hash(u uint32) uint32 { + return (u * 0x1e35a7bd) >> tableShift +} + +// snappyL1 encapsulates level 1 compression +type snappyL1 struct{} + +func (e *snappyL1) Reset() {} + +func (e *snappyL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 16 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Initialize the hash table. + // + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. + var table [tableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s)) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS)) + if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of Snappy's: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + s1 := base + maxMatchLength + if s1 > len(src) { + s1 = len(src) + } + a := src[s:s1] + b := src[candidate+4:] + b = b[:len(a)] + l := len(a) + for i := range a { + if a[i] != b[i] { + l = i + break + } + } + s += l + + // matchToken is flate's equivalent of Snappy's emitCopy. + dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) + dst.n++ + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x >> 0)) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x >> 8)) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x >> 16)) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + emitLiteral(dst, src[nextEmit:]) + } +} + +type tableEntry struct { + val uint32 + offset int32 +} + +func load3232(b []byte, i int32) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyGen struct { + prev []byte + cur int32 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyL2 struct { + snappyGen + table [tableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *snappyL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxStoreBlockSize + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash(now) + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || cv != candidate.val { + // Out of range or not matched. + cv = now + continue + } + break + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-1) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} + x >>= 8 + currHash := hash(uint32(x)) + candidate = e.table[currHash&tableMask] + e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != candidate.val { + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// snappyL3 +type snappyL3 struct { + snappyGen + table [tableSize]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *snappyL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +// snappyL4 +type snappyL4 struct { + snappyL3 +} + +// Encode uses a similar algorithm to level 3, +// but will check up to two candidates if first isn't long enough. +func (e *snappyL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 3 + minNonLiteralBlockSize = 1 + 1 + inputMargin + matchLenGood = 12 + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + var candidateAlt tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset < maxMatchOffset { + candidateAlt = candidates.Prev + } + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + // Try alternative candidate if match length < matchLenGood. + if l < matchLenGood-4 && candidateAlt.offset != 0 { + t2 := candidateAlt.offset - e.cur + 4 + l2 := e.matchlen(s, t2, src) + if l2 > l { + l = l2 + t = t2 + } + } + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + candidateAlt = tableEntry{} + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset <= maxMatchOffset { + candidateAlt = candidates.Prev + } + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // If we are inside the current block + if t >= 0 { + b := src[t:] + a := src[s:s1] + b = b[:len(a)] + // Extend the match to be as long as possible. + for i := range a { + if a[i] != b[i] { + return int32(i) + } + } + return int32(len(a)) + } + + // We found a match in the previous block. + tp := int32(len(e.prev)) + t + if tp < 0 { + return 0 + } + + // Extend the match to be as long as possible. + a := src[s:s1] + b := e.prev[tp:] + if len(b) > len(a) { + b = b[:len(a)] + } + a = a[:len(b)] + for i := range b { + if a[i] != b[i] { + return int32(i) + } + } + + // If we reached our limit, we matched everything we are + // allowed to in the previous block and we return. + n := int32(len(b)) + if int(s+n) == s1 { + return n + } + + // Continue looking for more matches in the current block. + a = src[s+n : s1] + b = src[:len(a)] + for i := range a { + if a[i] != b[i] { + return int32(i) + n + } + } + return int32(len(a)) + n +} + +// Reset the encoding table. +func (e *snappyGen) Reset() { + e.prev = e.prev[:0] + e.cur += maxMatchOffset +} diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect new file mode 100644 index 00000000000..c08165143f2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput new file mode 100644 index 00000000000..c08165143f2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden new file mode 100644 index 00000000000..db422ca3983 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in new file mode 100644 index 00000000000..5dfddf075be Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect new file mode 100644 index 00000000000..c08165143f2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput new file mode 100644 index 00000000000..c08165143f2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect new file mode 100644 index 00000000000..e4396ac6fe5 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput new file mode 100644 index 00000000000..e4396ac6fe5 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden new file mode 100644 index 00000000000..23d8f7f98b5 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in new file mode 100644 index 00000000000..efaed43431a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in @@ -0,0 +1 @@ +3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521105559644622948954930381964428810975665933446128475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920962829254091715364367892590360011330530548820466521384146951941511609433057270365759591953092186117381932611793105118548074462379962749567351885752724891227938183011949129833673362440656643086021394946395224737190702179860943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901224953430146549585371050792279689258923542019956112129021960864034418159813629774771309960518707211349999998372978049951059731732816096318595024459455346908302642522308253344685035261931188171010003137838752886587533208381420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909216420198938095257201065485863278865936153381827968230301952035301852968995773622599413891249721775283479131515574857242454150695950829533116861727855889075098381754637464939319255060400927701671139009848824012858361603563707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104752162056966024058038150193511253382430035587640247496473263914199272604269922796782354781636009341721641219924586315030286182974555706749838505494588586926995690927210797509302955321165344987202755960236480665499119881834797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548161361157352552133475741849468438523323907394143334547762416862518983569485562099219222184272550254256887671790494601653466804988627232791786085784383827967976681454100953883786360950680064225125205117392984896084128488626945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645995813390478027590099465764078951269468398352595709825822620522489407726719478268482601476990902640136394437455305068203496252451749399651431429809190659250937221696461515709858387410597885959772975498930161753928468138268683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244136549762780797715691435997700129616089441694868555848406353422072225828488648158456028506016842739452267467678895252138522549954666727823986456596116354886230577456498035593634568174324112515076069479451096596094025228879710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821682998948722658804857564014270477555132379641451523746234364542858444795265867821051141354735739523113427166102135969536231442952484937187110145765403590279934403742007310578539062198387447808478489683321445713868751943506430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675142691239748940907186494231961567945208095146550225231603881930142093762137855956638937787083039069792077346722182562599661501421503068038447734549202605414665925201497442850732518666002132434088190710486331734649651453905796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007230558763176359421873125147120532928191826186125867321579198414848829164470609575270695722091756711672291098169091528017350671274858322287183520935396572512108357915136988209144421006751033467110314126711136990865851639831501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064204675259070915481416549859461637180 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect new file mode 100644 index 00000000000..e4396ac6fe5 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput new file mode 100644 index 00000000000..e4396ac6fe5 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect new file mode 100644 index 00000000000..09dc798ee37 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput new file mode 100644 index 00000000000..0c24742fde2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden new file mode 100644 index 00000000000..09dc798ee37 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in new file mode 100644 index 00000000000..ce038ebb5bd Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect new file mode 100644 index 00000000000..09dc798ee37 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput new file mode 100644 index 00000000000..0c24742fde2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect new file mode 100644 index 00000000000..2d6527934e9 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput new file mode 100644 index 00000000000..2d6527934e9 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden new file mode 100644 index 00000000000..57e59322e98 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in new file mode 100644 index 00000000000..fb5b1be6198 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in @@ -0,0 +1,4 @@ +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +vH +% ɷ}>lsmIGH1Y4[ 0ˆ[|]o# +-#ulpfٱnYԀYwC8ɯ02 F=gnrN!O{k*w(b kQC9/lu>5C.u diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect new file mode 100644 index 00000000000..881e59c9ab9 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput new file mode 100644 index 00000000000..881e59c9ab9 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden new file mode 100644 index 00000000000..47d53c89c07 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in new file mode 100644 index 00000000000..8418633d2ac Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect new file mode 100644 index 00000000000..7812c1c62da Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput new file mode 100644 index 00000000000..7812c1c62da Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden new file mode 100644 index 00000000000..f5133778e1c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in new file mode 100644 index 00000000000..7c7a50d1583 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in @@ -0,0 +1,2 @@ +101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010 +232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect new file mode 100644 index 00000000000..7812c1c62da Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput new file mode 100644 index 00000000000..7812c1c62da Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect new file mode 100644 index 00000000000..71ce3aeb75a Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput new file mode 100644 index 00000000000..71ce3aeb75a Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden new file mode 100644 index 00000000000..ff023114bbc Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in new file mode 100644 index 00000000000..cc5c3ad69d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in @@ -0,0 +1,14 @@ +//Copyright2009ThGoAuthor.Allrightrrvd. +//UofthiourccodigovrndbyBSD-tyl +//licnthtcnbfoundinthLICENSEfil. + +pckgmin + +import"o" + +funcmin(){ + vrb=mk([]byt,65535) + f,_:=o.Crt("huffmn-null-mx.in") + f.Writ(b) +} +ABCDEFGHIJKLMNOPQRSTUVXxyz!"#¤%&/?" \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect new file mode 100644 index 00000000000..71ce3aeb75a Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput new file mode 100644 index 00000000000..71ce3aeb75a Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect new file mode 100644 index 00000000000..d448727c323 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput new file mode 100644 index 00000000000..d448727c323 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden new file mode 100644 index 00000000000..6d34c61fe0d --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden @@ -0,0 +1,3 @@ +AK0xßZLPa!xADI&#IEp]LƿFp 188h$5S- F66!)v.0Y& SN|d2: +t|둍xz9骺Ɏ3 +&&=ôUD=Fu]qUL+>FQYLZofTߵEŴ{Yʶbe \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in new file mode 100644 index 00000000000..73398b98b54 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +func main() { + var b = make([]byte, 65535) + f, _ := os.Create("huffman-null-max.in") + f.Write(b) +} diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect new file mode 100644 index 00000000000..d448727c323 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput new file mode 100644 index 00000000000..d448727c323 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect new file mode 100644 index 00000000000..830348a79ad Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput new file mode 100644 index 00000000000..830348a79ad Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden new file mode 100644 index 00000000000..5abdbaff9a6 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in new file mode 100644 index 00000000000..349be0e6ec6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in @@ -0,0 +1 @@ +00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect new file mode 100644 index 00000000000..dbe401c54c4 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput new file mode 100644 index 00000000000..dbe401c54c4 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput new file mode 100644 index 00000000000..8b92d9fc20f Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput new file mode 100644 index 00000000000..8b92d9fc20f Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 00000000000..4f275ea61df --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,115 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import "fmt" + +const ( + // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused + // 8 bits: xlength = length - MIN_MATCH_LENGTH + // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1< pair into a match token. +func matchToken(xlength uint32, xoffset uint32) token { + return token(matchType + xlength< maxMatchLength || xoffset > maxMatchOffset { + panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset)) + return token(matchType) + } + return token(matchType + xlength<> lengthShift) } + +func lengthCode(len uint32) uint32 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[off>>7] + 14 + } else { + return offsetCodes[off>>14] + 28 + } +} diff --git a/vendor/github.com/klauspost/compress/flate/writer_test.go b/vendor/github.com/klauspost/compress/flate/writer_test.go new file mode 100644 index 00000000000..024512afb21 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/writer_test.go @@ -0,0 +1,258 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "runtime" + "testing" +) + +func benchmarkEncoder(b *testing.B, testfile, level, n int) { + b.StopTimer() + b.SetBytes(int64(n)) + buf0, err := ioutil.ReadFile(testfiles[testfile]) + if err != nil { + b.Fatal(err) + } + if len(buf0) == 0 { + b.Fatalf("test file %q has no data", testfiles[testfile]) + } + buf1 := make([]byte, n) + for i := 0; i < n; i += len(buf0) { + if len(buf0) > n-i { + buf0 = buf0[:n-i] + } + copy(buf1[i:], buf0) + } + buf0 = nil + runtime.GC() + w, err := NewWriter(ioutil.Discard, level) + b.StartTimer() + for i := 0; i < b.N; i++ { + w.Reset(ioutil.Discard) + _, err = w.Write(buf1) + if err != nil { + b.Fatal(err) + } + err = w.Close() + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeDigitsConstant1e4(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e4) } +func BenchmarkEncodeDigitsConstant1e5(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e5) } +func BenchmarkEncodeDigitsConstant1e6(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e6) } +func BenchmarkEncodeDigitsSpeed1e4(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e4) } +func BenchmarkEncodeDigitsSpeed1e5(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e5) } +func BenchmarkEncodeDigitsSpeed1e6(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e6) } +func BenchmarkEncodeDigitsDefault1e4(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e4) } +func BenchmarkEncodeDigitsDefault1e5(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e5) } +func BenchmarkEncodeDigitsDefault1e6(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e6) } +func BenchmarkEncodeDigitsCompress1e4(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e4) } +func BenchmarkEncodeDigitsCompress1e5(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e5) } +func BenchmarkEncodeDigitsCompress1e6(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e6) } +func BenchmarkEncodeTwainConstant1e4(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e4) } +func BenchmarkEncodeTwainConstant1e5(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e5) } +func BenchmarkEncodeTwainConstant1e6(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e6) } +func BenchmarkEncodeTwainSpeed1e4(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e4) } +func BenchmarkEncodeTwainSpeed1e5(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e5) } +func BenchmarkEncodeTwainSpeed1e6(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e6) } +func BenchmarkEncodeTwainDefault1e4(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e4) } +func BenchmarkEncodeTwainDefault1e5(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e5) } +func BenchmarkEncodeTwainDefault1e6(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e6) } +func BenchmarkEncodeTwainCompress1e4(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e4) } +func BenchmarkEncodeTwainCompress1e5(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e5) } +func BenchmarkEncodeTwainCompress1e6(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e6) } + +// A writer that fails after N writes. +type errorWriter struct { + N int +} + +func (e *errorWriter) Write(b []byte) (int, error) { + if e.N <= 0 { + return 0, io.ErrClosedPipe + } + e.N-- + return len(b), nil +} + +// Test if errors from the underlying writer is passed upwards. +func TestWriteError(t *testing.T) { + buf := new(bytes.Buffer) + n := 65536 + if !testing.Short() { + n *= 4 + } + for i := 0; i < n; i++ { + fmt.Fprintf(buf, "asdasfasf%d%dfghfgujyut%dyutyu\n", i, i, i) + } + in := buf.Bytes() + // We create our own buffer to control number of writes. + copyBuf := make([]byte, 128) + for l := 0; l < 10; l++ { + for fail := 1; fail <= 256; fail *= 2 { + // Fail after 'fail' writes + ew := &errorWriter{N: fail} + w, err := NewWriter(ew, l) + if err != nil { + t.Fatalf("NewWriter: level %d: %v", l, err) + } + n, err := copyBuffer(w, bytes.NewBuffer(in), copyBuf) + if err == nil { + t.Fatalf("Level %d: Expected an error, writer was %#v", l, ew) + } + n2, err := w.Write([]byte{1, 2, 2, 3, 4, 5}) + if n2 != 0 { + t.Fatal("Level", l, "Expected 0 length write, got", n) + } + if err == nil { + t.Fatal("Level", l, "Expected an error") + } + err = w.Flush() + if err == nil { + t.Fatal("Level", l, "Expected an error on flush") + } + err = w.Close() + if err == nil { + t.Fatal("Level", l, "Expected an error on close") + } + + w.Reset(ioutil.Discard) + n2, err = w.Write([]byte{1, 2, 3, 4, 5, 6}) + if err != nil { + t.Fatal("Level", l, "Got unexpected error after reset:", err) + } + if n2 == 0 { + t.Fatal("Level", l, "Got 0 length write, expected > 0") + } + if testing.Short() { + return + } + } + } +} + +func TestDeterministicL1(t *testing.T) { testDeterministic(1, t) } +func TestDeterministicL2(t *testing.T) { testDeterministic(2, t) } +func TestDeterministicL3(t *testing.T) { testDeterministic(3, t) } +func TestDeterministicL4(t *testing.T) { testDeterministic(4, t) } +func TestDeterministicL5(t *testing.T) { testDeterministic(5, t) } +func TestDeterministicL6(t *testing.T) { testDeterministic(6, t) } +func TestDeterministicL7(t *testing.T) { testDeterministic(7, t) } +func TestDeterministicL8(t *testing.T) { testDeterministic(8, t) } +func TestDeterministicL9(t *testing.T) { testDeterministic(9, t) } +func TestDeterministicL0(t *testing.T) { testDeterministic(0, t) } +func TestDeterministicLM2(t *testing.T) { testDeterministic(-2, t) } + +func testDeterministic(i int, t *testing.T) { + // Test so much we cross a good number of block boundaries. + var length = maxStoreBlockSize*30 + 500 + if testing.Short() { + length /= 10 + } + + // Create a random, but compressible stream. + rng := rand.New(rand.NewSource(1)) + t1 := make([]byte, length) + for i := range t1 { + t1[i] = byte(rng.Int63() & 7) + } + + // Do our first encode. + var b1 bytes.Buffer + br := bytes.NewBuffer(t1) + w, err := NewWriter(&b1, i) + if err != nil { + t.Fatal(err) + } + // Use a very small prime sized buffer. + cbuf := make([]byte, 787) + _, err = copyBuffer(w, br, cbuf) + if err != nil { + t.Fatal(err) + } + w.Close() + + // We choose a different buffer size, + // bigger than a maximum block, and also a prime. + var b2 bytes.Buffer + cbuf = make([]byte, 81761) + br2 := bytes.NewBuffer(t1) + w2, err := NewWriter(&b2, i) + if err != nil { + t.Fatal(err) + } + _, err = copyBuffer(w2, br2, cbuf) + if err != nil { + t.Fatal(err) + } + w2.Close() + + b1b := b1.Bytes() + b2b := b2.Bytes() + + if !bytes.Equal(b1b, b2b) { + t.Errorf("level %d did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) + } + + // Test using io.WriterTo interface. + var b3 bytes.Buffer + br = bytes.NewBuffer(t1) + w, err = NewWriter(&b3, i) + if err != nil { + t.Fatal(err) + } + _, err = br.WriteTo(w) + if err != nil { + t.Fatal(err) + } + w.Close() + + b3b := b3.Bytes() + if !bytes.Equal(b1b, b3b) { + t.Errorf("level %d (io.WriterTo) did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b3b)) + } +} + +// copyBuffer is a copy of io.CopyBuffer, since we want to support older go versions. +// This is modified to never use io.WriterTo or io.ReaderFrom interfaces. +func copyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + if buf == nil { + buf = make([]byte, 32*1024) + } + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000000..ea7324da671 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000000..b9db204f59d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,107 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + b.fill() + b.fill() + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // Do single re-slice to avoid bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000000..43e463611b1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000000..f228a46cdf6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,56 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off : b.off+4 : b.off+4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off : b.off+4 : b.off+4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000000..398e974f654 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,684 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) >= 2<<30 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if uint16(charnum) > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < int(s.symbolLen) { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (iend - 4 - b.off)) + b.off = iend - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < int(tableSize) { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + off = 0 + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = uint16(in.getBits(tableLog)) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000000..490e6806a3d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,143 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = 2 << 30 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/fse/fse_test.go b/vendor/github.com/klauspost/compress/fse/fse_test.go new file mode 100644 index 00000000000..5407d26668f --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse_test.go @@ -0,0 +1,310 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +type inputFn func() ([]byte, error) + +var testfiles = []struct { + name string + fn inputFn + err error +}{ + // gettysburg.txt is a small plain text. + {name: "gettysburg", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/gettysburg.txt") }}, + // Digits is the digits of the irrational number e. Its decimal representation + // does not repeat, but there are only 10 possible digits, so it should be + // reasonably compressible. + {name: "digits", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/e.txt") }}, + // Twain is Project Gutenberg's edition of Mark Twain's classic English novel. + {name: "twain", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/Mark.Twain-Tom.Sawyer.txt") }}, + // Random bytes + {name: "random", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/sharnd.out") }, err: ErrIncompressible}, + // Low entropy + {name: "low-ent", fn: func() ([]byte, error) { return []byte(strings.Repeat("1221", 10000)), nil }}, + // Super Low entropy + {name: "superlow-ent", fn: func() ([]byte, error) { return []byte(strings.Repeat("1", 10000) + strings.Repeat("2", 500)), nil }}, + // Zero bytes + {name: "zeroes", fn: func() ([]byte, error) { return make([]byte, 10000), nil }, err: ErrUseRLE}, + {name: "crash1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash1.bin") }, err: ErrIncompressible}, + {name: "crash2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash2.bin") }, err: ErrIncompressible}, + {name: "crash3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash3.bin") }, err: ErrIncompressible}, + {name: "endzerobits", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/endzerobits.bin") }, err: nil}, + {name: "endnonzero", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/endnonzero.bin") }, err: ErrIncompressible}, + {name: "case1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case1.bin") }, err: ErrIncompressible}, + {name: "case2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case2.bin") }, err: ErrIncompressible}, + {name: "case3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case3.bin") }, err: ErrIncompressible}, + {name: "pngdata.001", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/pngdata.bin") }, err: nil}, + {name: "normcount2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/normcount2.bin") }, err: nil}, +} + +var decTestfiles = []struct { + name string + fn inputFn + err string +}{ + // gettysburg.txt is a small plain text. + {name: "hang1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-hang1.bin") }, err: "corruption detected (bitCount 252 > 32)"}, + {name: "hang2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-hang2.bin") }, err: "newState (0) == oldState (0) and no bits"}, + {name: "hang3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-hang3.bin") }, err: "maxSymbolValue too small"}, + {name: "symlen1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/dec-symlen1.bin") }, err: "symbolLen (257) too big"}, + {name: "crash4", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash4.bin") }, err: "symbolLen (1) too small"}, + {name: "crash5", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash5.bin") }, err: "symbolLen (1) too small"}, +} + +func TestCompress(t *testing.T) { + for _, test := range testfiles { + t.Run(test.name, func(t *testing.T) { + var s Scratch + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + b, err := Compress(buf0, &s) + if err != test.err { + t.Errorf("want error %v (%T), got %v (%T)", test.err, test.err, err, err) + } + if b == nil { + t.Log(test.name + ": not compressible") + return + } + t.Logf("%s: %d -> %d bytes (%.2f:1)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b))) + }) + } +} + +func ExampleCompress() { + // Read data + data, err := ioutil.ReadFile("../testdata/e.txt") + if err != nil { + panic(err) + } + + // Create re-usable scratch buffer. + var s Scratch + b, err := Compress(data, &s) + if err != nil { + panic(err) + } + fmt.Printf("Compress: %d -> %d bytes (%.2f:1)\n", len(data), len(b), float64(len(data))/float64(len(b))) + // OUTPUT: Compress: 100003 -> 41564 bytes (2.41:1) +} + +func TestDecompress(t *testing.T) { + for _, test := range decTestfiles { + t.Run(test.name, func(t *testing.T) { + var s Scratch + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + b, err := Decompress(buf0, &s) + if fmt.Sprint(err) != test.err { + t.Errorf("want error %q, got %q (%T)", test.err, err, err) + return + } + if err != nil { + return + } + if len(b) == 0 { + t.Error(test.name + ": no output") + return + } + t.Logf("%s: %d -> %d bytes (1:%.2f)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b))) + }) + } +} + +func ExampleDecompress() { + // Read data + data, err := ioutil.ReadFile("../testdata/e.txt") + if err != nil { + panic(err) + } + + // Create re-usable scratch buffer. + var s Scratch + b, err := Compress(data, &s) + if err != nil { + panic(err) + } + + // Since we use the output of compression, it cannot be used as output for decompression. + s.Out = make([]byte, 0, len(data)) + d, err := Decompress(b, &s) + if err != nil { + panic(err) + } + fmt.Printf("Input matches: %t\n", bytes.Equal(d, data)) + // OUTPUT: Input matches: true +} + +func TestGenCorpus(t *testing.T) { + t.Skip("only for generating decompress corpus") + filepath.Walk("fuzz/compress/corpus", func(path string, info os.FileInfo, err error) error { + t.Run(path, func(t *testing.T) { + var s Scratch + buf0, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + b, err := Compress(buf0, &s) + if err != nil { + t.Skip("skipping") + return + } + t.Logf("%s: %d -> %d bytes (%.2f:1)", path, len(buf0), len(b), float64(len(buf0))/float64(len(b))) + dstP := strings.Replace(path, "compress", "decompress", 1) + ioutil.WriteFile(dstP, b, os.ModePerm) + }) + return nil + }) +} + +func BenchmarkCompress(b *testing.B) { + for _, tt := range testfiles { + test := tt + b.Run(test.name, func(b *testing.B) { + var s Scratch + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + _, err = Compress(buf0, &s) + if err != test.err { + b.Fatal("unexpected error:", err) + } + if err != nil { + b.Skip("skipping benchmark: ", err) + return + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, _ = Compress(buf0, &s) + } + }) + } +} + +func TestReadNCount(t *testing.T) { + for i := range testfiles { + var s Scratch + test := testfiles[i] + t.Run(test.name, func(t *testing.T) { + name := test.name + ": " + buf0, err := testfiles[i].fn() + if err != nil { + t.Fatal(err) + } + b, err := Compress(buf0, &s) + if err != test.err { + t.Error(err) + return + } + if err != nil { + t.Skip(name + err.Error()) + return + } + t.Logf("%s: %d -> %d bytes (%.2f:1)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b))) + //t.Logf("%v", b) + var s2 Scratch + dc, err := Decompress(b, &s2) + if err != nil { + t.Fatal(err) + } + want := s.norm[:s.symbolLen] + got := s2.norm[:s2.symbolLen] + if !cmp.Equal(want, got) { + if s.actualTableLog != s2.actualTableLog { + t.Errorf(name+"norm table, want tablelog: %d, got %d", s.actualTableLog, s2.actualTableLog) + } + if s.symbolLen != s2.symbolLen { + t.Errorf(name+"norm table, want size: %d, got %d", s.symbolLen, s2.symbolLen) + } + t.Errorf(name+"norm table, got delta: \n%s", cmp.Diff(want, got)) + return + } + for i, dec := range s2.decTable { + dd := dec.symbol + ee := s.ct.tableSymbol[i] + if dd != ee { + t.Errorf("table symbol mismatch. idx %d, enc: %v, dec:%v", i, ee, dd) + break + } + } + if dc != nil { + if len(buf0) != len(dc) { + t.Errorf(name+"decompressed, want size: %d, got %d", len(buf0), len(dc)) + if len(buf0) > len(dc) { + buf0 = buf0[:len(dc)] + } else { + dc = dc[:len(buf0)] + } + if !cmp.Equal(buf0, dc) { + t.Errorf(name+"decompressed, got delta: (in) %v != (out) %v\n", buf0, dc) + } + return + } + if !cmp.Equal(buf0, dc) { + t.Errorf(name+"decompressed, got delta: \n%s", cmp.Diff(buf0, dc)) + } + if !t.Failed() { + t.Log("... roundtrip ok!") + } + } + }) + } +} + +func BenchmarkDecompress(b *testing.B) { + for _, tt := range testfiles { + test := tt + b.Run(test.name, func(b *testing.B) { + var s, s2 Scratch + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + out, err := Compress(buf0, &s) + if err != test.err { + b.Fatal(err) + } + if err != nil { + b.Skip(test.name + ": " + err.Error()) + return + } + got, err := Decompress(out, &s2) + if err != nil { + b.Fatal(err) + } + if !bytes.Equal(buf0, got) { + b.Fatal("output mismatch") + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, err = Decompress(out, &s2) + if err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/001e59af3c698337b5449d856ef447fd0c0b47ff b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/001e59af3c698337b5449d856ef447fd0c0b47ff new file mode 100644 index 00000000000..7b6ae9e6072 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/001e59af3c698337b5449d856ef447fd0c0b47ff differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/00f14dd34076a71e9e54b9b88497c2a214c91d81-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/00f14dd34076a71e9e54b9b88497c2a214c91d81-3 new file mode 100644 index 00000000000..b1f7368d613 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/00f14dd34076a71e9e54b9b88497c2a214c91d81-3 @@ -0,0 +1 @@ +c|W 0^&B55548270565|n|ne \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/01a027e4ad99a383228779f2aaf1a2f4a3f3857b-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/01a027e4ad99a383228779f2aaf1a2f4a3f3857b-3 new file mode 100644 index 00000000000..85c94685aeb --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/01a027e4ad99a383228779f2aaf1a2f4a3f3857b-3 @@ -0,0 +1 @@ +||l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/028b20cc463cb40889c44f46b67d13db0c6b50a2-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/028b20cc463cb40889c44f46b67d13db0c6b50a2-2 new file mode 100644 index 00000000000..79629021552 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/028b20cc463cb40889c44f46b67d13db0c6b50a2-2 @@ -0,0 +1 @@ +|u|l888178419700125232338905334425pc⃀?n \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/02a4a6eccf33f032b9acc928be2f5377b31d0b03-11 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/02a4a6eccf33f032b9acc928be2f5377b31d0b03-11 new file mode 100644 index 00000000000..d93ae705c74 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/02a4a6eccf33f032b9acc928be2f5377b31d0b03-11 @@ -0,0 +1 @@ +c|W 70^&B55555554827056640123456789abcdefghijklmnpq828V12-0rstuvwxyz]=!(BADPREC)68@'E>103ĉs){1Kboo�l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/03648029374f39fc083b90d39df1f80ecc1fa78e-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/03648029374f39fc083b90d39df1f80ecc1fa78e-1 new file mode 100644 index 00000000000..b0b2408b26e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/03648029374f39fc083b90d39df1f80ecc1fa78e-1 @@ -0,0 +1 @@ +--)!)')44441402363-40e1a8Aea03B00161__1A-!_1________8)-0414'' \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/067422cb1d02d21c8fc4d86a4b00c126d3f24699-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/067422cb1d02d21c8fc4d86a4b00c126d3f24699-2 new file mode 100644 index 00000000000..014b508de5a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/067422cb1d02d21c8fc4d86a4b00c126d3f24699-2 @@ -0,0 +1 @@ +24055111512312578270211815834045410156250650722545140161410432351365152036224040CE3BdA64ABbeDddDfd9eF6C55511151231257827021181583404� 54101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABf82208f6CDcE92fbE6 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0b0a852a4680d8f3fe07ad0e9997b18186918c78-10 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0b0a852a4680d8f3fe07ad0e9997b18186918c78-10 new file mode 100644 index 00000000000..2175b8ecfbe --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0b0a852a4680d8f3fe07ad0e9997b18186918c78-10 @@ -0,0 +1 @@ +c|W 7%B5S0123456789 ~1 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0c33deff4510591c4ec94593a4d13f9d899523e4-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0c33deff4510591c4ec94593a4d13f9d899523e4-5 new file mode 100644 index 00000000000..e9d048c4178 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0c33deff4510591c4ec94593a4d13f9d899523e4-5 @@ -0,0 +1 @@ +<55555555559 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0c851d4e4c8b5055b6ce3b68089892203057b3c0-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0c851d4e4c8b5055b6ce3b68089892203057b3c0-4 new file mode 100644 index 00000000000..a3e76120332 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0c851d4e4c8b5055b6ce3b68089892203057b3c0-4 @@ -0,0 +1 @@ +|pW2zu| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0d0de3f34f3d27f32d12be358cb2dde550d72172 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0d0de3f34f3d27f32d12be358cb2dde550d72172 new file mode 100644 index 00000000000..cc4e4242c45 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0d0de3f34f3d27f32d12be358cb2dde550d72172 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0ead65857937e78706dcaf139c616ef71533b94d-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0ead65857937e78706dcaf139c616ef71533b94d-2 new file mode 100644 index 00000000000..3ea2a9f9773 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/0ead65857937e78706dcaf139c616ef71533b94d-2 @@ -0,0 +1 @@ +2555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABbeDddDfd9eF6C55511151231257827021181583404� 54101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABf82208f6CDc \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/110c8a30c16070bf2813480d9492a1a170a7d80a-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/110c8a30c16070bf2813480d9492a1a170a7d80a-4 new file mode 100644 index 00000000000..ce6bfe640d0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/110c8a30c16070bf2813480d9492a1a170a7d80a-4 @@ -0,0 +1 @@ +ll \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/138deac60e8494cd0329602411774ea9ba629f72-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/138deac60e8494cd0329602411774ea9ba629f72-6 new file mode 100644 index 00000000000..ed848aad0a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/138deac60e8494cd0329602411774ea9ba629f72-6 @@ -0,0 +1 @@ +4651953614188823848962783813 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/19aa2a437fa2a48199cc0294469a0a942ae1483a-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/19aa2a437fa2a48199cc0294469a0a942ae1483a-4 new file mode 100644 index 00000000000..c103e687ac0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/19aa2a437fa2a48199cc0294469a0a942ae1483a-4 @@ -0,0 +1 @@ +5555555555555555555555555555555555555555555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1b6cc66a7d78b5b1f37a7d4d7efaaeed1a6ce630-9 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1b6cc66a7d78b5b1f37a7d4d7efaaeed1a6ce630-9 new file mode 100644 index 00000000000..82fb8078e75 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1b6cc66a7d78b5b1f37a7d4d7efaaeed1a6ce630-9 @@ -0,0 +1 @@ +c|W 7%B5S0123456789abcdefghijklmnpqrstuvwxyz]=!(BADPREC)68@'E>1031Kboo�l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1bf17f213e2aee50a14d920aa1c615edab1b4d11 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1bf17f213e2aee50a14d920aa1c615edab1b4d11 new file mode 100644 index 00000000000..837483c0e61 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1bf17f213e2aee50a14d920aa1c615edab1b4d11 @@ -0,0 +1 @@ +-86840441164045277856725051e0xFA0FAF9b9dd1FFF87fb9BACa3aF62FDac8 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1cfd49b23106a3de30052a18cee3bed23d08f552-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1cfd49b23106a3de30052a18cee3bed23d08f552-7 new file mode 100644 index 00000000000..2ab3ea1b133 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1cfd49b23106a3de30052a18cee3bed23d08f552-7 @@ -0,0 +1 @@ +c|pW s%!(BADPREC)5?h睙63837019buo�lo \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1ecd000fbf02c3bd4cf7cfa7a314580789536ab0-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1ecd000fbf02c3bd4cf7cfa7a314580789536ab0-3 new file mode 100644 index 00000000000..6e3352f7151 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1ecd000fbf02c3bd4cf7cfa7a314580789536ab0-3 @@ -0,0 +1 @@ +c|pW2zu| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1f2a2c8867dce3ed56fef849e5de664c20cdae39-10 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1f2a2c8867dce3ed56fef849e5de664c20cdae39-10 new file mode 100644 index 00000000000..b64d627cea6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1f2a2c8867dce3ed56fef849e5de664c20cdae39-10 @@ -0,0 +1 @@ +c|W 7%B4827056640123456789abcdefghijklmnpqrstuvwxyz]=!(BADPREC)68@'E>103ĉs){1Kboo�l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1f969240d093a413365023cc05432790a500046a-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1f969240d093a413365023cc05432790a500046a-1 new file mode 100644 index 00000000000..444c24fa57a Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1f969240d093a413365023cc05432790a500046a-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1fd68df0abe0dea0a5cf30153dcf48b5684a77de-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1fd68df0abe0dea0a5cf30153dcf48b5684a77de-1 new file mode 100644 index 00000000000..3cd4be1af0c --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/1fd68df0abe0dea0a5cf30153dcf48b5684a77de-1 @@ -0,0 +1 @@ +5555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2547cc736e951fa4919853c43ae890861a3b3264-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2547cc736e951fa4919853c43ae890861a3b3264-4 new file mode 100644 index 00000000000..fa37ca9bf9d Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2547cc736e951fa4919853c43ae890861a3b3264-4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/254bff7e4526a1c06684242ce8c6f32b07466192-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/254bff7e4526a1c06684242ce8c6f32b07466192-2 new file mode 100644 index 00000000000..00690bfd428 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/254bff7e4526a1c06684242ce8c6f32b07466192-2 @@ -0,0 +1 @@ +c \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/26157894111c075cd978a0a67a1bd2c6184e92d6-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/26157894111c075cd978a0a67a1bd2c6184e92d6-3 new file mode 100644 index 00000000000..18081c3100a Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/26157894111c075cd978a0a67a1bd2c6184e92d6-3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/26cc3dcead75312b7037f0c81d2a88ea04cf2c48 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/26cc3dcead75312b7037f0c81d2a88ea04cf2c48 new file mode 100644 index 00000000000..06089b9d81b Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/26cc3dcead75312b7037f0c81d2a88ea04cf2c48 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/283e76a3a5ac43d02ad685c555a6eb9a8fbe88e5-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/283e76a3a5ac43d02ad685c555a6eb9a8fbe88e5-4 new file mode 100644 index 00000000000..3b36f3100ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/283e76a3a5ac43d02ad685c555a6eb9a8fbe88e5-4 @@ -0,0 +1 @@ +c||nm flag fieldu \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/284e42ad2dbdfcfb6c7e6c7d5ad1bcc7e911f96f-10 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/284e42ad2dbdfcfb6c7e6c7d5ad1bcc7e911f96f-10 new file mode 100644 index 00000000000..c5a987ab5df --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/284e42ad2dbdfcfb6c7e6c7d5ad1bcc7e911f96f-10 @@ -0,0 +1 @@ +|W %S0123479abcdefghijklmnopqrstuvwxyzソ]=!(BADPREC)568@' \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2891dddca25448c8f1671b9779f715b45e6ebef0-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2891dddca25448c8f1671b9779f715b45e6ebef0-3 new file mode 100644 index 00000000000..4d9f423e780 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2891dddca25448c8f1671b9779f715b45e6ebef0-3 @@ -0,0 +1 @@ +|u| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2b8284f1553bb6e799466377047fff8a30e1dba0-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2b8284f1553bb6e799466377047fff8a30e1dba0-6 new file mode 100644 index 00000000000..03caa51f5ab --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2b8284f1553bb6e799466377047fff8a30e1dba0-6 @@ -0,0 +1 @@ +c|pW s')0567819432u| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2c3a94ad7435a7c8ae24df733e7c3a1044b38580-10 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2c3a94ad7435a7c8ae24df733e7c3a1044b38580-10 new file mode 100644 index 00000000000..ad723eb0cc3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/2c3a94ad7435a7c8ae24df733e7c3a1044b38580-10 @@ -0,0 +1 @@ +\W 7%B8673617379884035472059622406959533691406255S0123456789abcdefghijklmnpqrstuvwxyz]=!(BADPREC)68@'E>1031Kboo� \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/326998db244ec22e4545688efc3fcf761edcfa47 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/326998db244ec22e4545688efc3fcf761edcfa47 new file mode 100644 index 00000000000..43b96595474 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/326998db244ec22e4545688efc3fcf761edcfa47 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/32796a5dc0d50e6c16a3400af331c4a1788d2620-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/32796a5dc0d50e6c16a3400af331c4a1788d2620-4 new file mode 100644 index 00000000000..3757a051ed4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/32796a5dc0d50e6c16a3400af331c4a1788d2620-4 @@ -0,0 +1 @@ +55555555559 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/33844260640d4b3bc276376be9b5b5a1c53350cb-8 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/33844260640d4b3bc276376be9b5b5a1c53350cb-8 new file mode 100644 index 00000000000..1afa5a8b995 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/33844260640d4b3bc276376be9b5b5a1c53350cb-8 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/33d53df606943afe544548920b1b8bd3e1ecf731-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/33d53df606943afe544548920b1b8bd3e1ecf731-7 new file mode 100644 index 00000000000..185cf20bf17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/33d53df606943afe544548920b1b8bd3e1ecf731-7 @@ -0,0 +1 @@ +f|W s'p)05678too few operands for format '%19432u2 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3473974824e5391343390e25b1e250edf2ab45cf-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3473974824e5391343390e25b1e250edf2ab45cf-6 new file mode 100644 index 00000000000..128a0f0dbe4 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3473974824e5391343390e25b1e250edf2ab45cf-6 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/393fe96068641f74607dd7eebde0807de355944a-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/393fe96068641f74607dd7eebde0807de355944a-2 new file mode 100644 index 00000000000..d9aa4578941 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/393fe96068641f74607dd7eebde0807de355944a-2 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3ac84d808844f1204e0704eb2d87a083d17e5edf-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3ac84d808844f1204e0704eb2d87a083d17e5edf-4 new file mode 100644 index 00000000000..1d118e0e976 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3ac84d808844f1204e0704eb2d87a083d17e5edf-4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3b3dc5be03fd492a33bd331979fb5498f5df1e52-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3b3dc5be03fd492a33bd331979fb5498f5df1e52-1 new file mode 100644 index 00000000000..d64bff49643 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3b3dc5be03fd492a33bd331979fb5498f5df1e52-1 @@ -0,0 +1 @@ +5555555555555etProcessMemoryInfo5217012461354e46015555555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3ca5ae6d9b3a1d465c8214e272c32a60ebc0c848-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3ca5ae6d9b3a1d465c8214e272c32a60ebc0c848-2 new file mode 100644 index 00000000000..542906e178e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3ca5ae6d9b3a1d465c8214e272c32a60ebc0c848-2 @@ -0,0 +1 @@ +55511151225 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3d648fdd4eee536b735033f7eb135b28ca5c8526 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3d648fdd4eee536b735033f7eb135b28ca5c8526 new file mode 100644 index 00000000000..ed669c05da3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3d648fdd4eee536b735033f7eb135b28ca5c8526 @@ -0,0 +1 @@ +555111512312578270211815834045410156250xD9beDddDfd9eF6C5551115123125782702118158340454101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABf82208f6CDcE92fbE6F \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3da89ee273be13437e7ecf760f3fbd4dc0e8d1fe-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3da89ee273be13437e7ecf760f3fbd4dc0e8d1fe-7 new file mode 100644 index 00000000000..20d5cb86e6d Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3da89ee273be13437e7ecf760f3fbd4dc0e8d1fe-7 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3e9b93aed9d65aab72e65351b433bce4a091ab0d b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3e9b93aed9d65aab72e65351b433bce4a091ab0d new file mode 100644 index 00000000000..24adff12b16 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3e9b93aed9d65aab72e65351b433bce4a091ab0d differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3fc41ed16660ff04dc86763d24f7f41e7ac72e57-10 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3fc41ed16660ff04dc86763d24f7f41e7ac72e57-10 new file mode 100644 index 00000000000..57a876d20e5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/3fc41ed16660ff04dc86763d24f7f41e7ac72e57-10 @@ -0,0 +1 @@ +|W 7%B5S0123456789acdefghijklmnpqrstuvwxyz]=!(BADPREC)8@'E>1031Kboo�l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/40259a0dccc3648661894e1cb9d43ecb18c1e94f-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/40259a0dccc3648661894e1cb9d43ecb18c1e94f-4 new file mode 100644 index 00000000000..3b927e21c7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/40259a0dccc3648661894e1cb9d43ecb18c1e94f-4 @@ -0,0 +1 @@ +||346519536141888238489627838134765(25l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/440ac3e84eb89ba7c864261c36567ec92f111be5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/440ac3e84eb89ba7c864261c36567ec92f111be5 new file mode 100644 index 00000000000..7b6e0bb4941 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/440ac3e84eb89ba7c864261c36567ec92f111be5 @@ -0,0 +1 @@ +55555555555555555545555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/44a33b09be4119510e06b8cf5f695658dd0da430-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/44a33b09be4119510e06b8cf5f695658dd0da430-3 new file mode 100644 index 00000000000..2f4d368b8b6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/44a33b09be4119510e06b8cf5f695658dd0da430-3 @@ -0,0 +1 @@ +c|n|name flag fieldu \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/463d78c147df54fb78e55f52f79c9dff7a42c5c3-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/463d78c147df54fb78e55f52f79c9dff7a42c5c3-4 new file mode 100644 index 00000000000..828c8465af9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/463d78c147df54fb78e55f52f79c9dff7a42c5c3-4 @@ -0,0 +1 @@ +c|pW 2zu| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4a076c6a1ff5c3956d52a9a18c90636d9271d5dc-8 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4a076c6a1ff5c3956d52a9a18c90636d9271d5dc-8 new file mode 100644 index 00000000000..d50965b22be --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4a076c6a1ff5c3956d52a9a18c90636d9271d5dc-8 @@ -0,0 +1 @@ +c|W s%B�5S奔ソ]=!(BADPREC)568@'E>17031Kboo�l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4a0e9c56d9b4d655b8c750e01b63aaee512c79dd b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4a0e9c56d9b4d655b8c750e01b63aaee512c79dd new file mode 100644 index 00000000000..1ed2a7e2c14 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4a0e9c56d9b4d655b8c750e01b63aaee512c79dd @@ -0,0 +1 @@ +5555555555555555555555555555554045217012461354e46015555555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4c2d1f5dfeb5273fe3c44edc2c9b016159d23b5c-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4c2d1f5dfeb5273fe3c44edc2c9b016159d23b5c-3 new file mode 100644 index 00000000000..b12fa85ed5e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4c2d1f5dfeb5273fe3c44edc2c9b016159d23b5c-3 @@ -0,0 +1 @@ +55511110156 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4d92faede7356a8aa5c0fd7dc7df1cbf6ec7bc54-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4d92faede7356a8aa5c0fd7dc7df1cbf6ec7bc54-4 new file mode 100644 index 00000000000..87c2df54e00 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4d92faede7356a8aa5c0fd7dc7df1cbf6ec7bc54-4 @@ -0,0 +1 @@ +5555555555555555555555555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4f480dad3c4e936864adaa7707bac06e59272d96 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4f480dad3c4e936864adaa7707bac06e59272d96 new file mode 100644 index 00000000000..1397e77b63d Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4f480dad3c4e936864adaa7707bac06e59272d96 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4f784804ea2ba40c6ff5251b1c092c382b8ff236-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4f784804ea2ba40c6ff5251b1c092c382b8ff236-3 new file mode 100644 index 00000000000..25b399cc96b Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/4f784804ea2ba40c6ff5251b1c092c382b8ff236-3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/500ce5edbda324f9a385f06a24094cee873bcf07-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/500ce5edbda324f9a385f06a24094cee873bcf07-6 new file mode 100644 index 00000000000..606a865ef29 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/500ce5edbda324f9a385f06a24094cee873bcf07-6 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5135c69c6aca6f4aad9cd65660398fc3a7bce387-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5135c69c6aca6f4aad9cd65660398fc3a7bce387-5 new file mode 100644 index 00000000000..753046ffc9f Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5135c69c6aca6f4aad9cd65660398fc3a7bce387-5 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/537d5d54bfa6fbdce3e0d7b7746097ba124ee7d8-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/537d5d54bfa6fbdce3e0d7b7746097ba124ee7d8-3 new file mode 100644 index 00000000000..ab4b400fc6e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/537d5d54bfa6fbdce3e0d7b7746097ba124ee7d8-3 @@ -0,0 +1 @@ +55555555531 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/54d1f293ba75bf6ff72e8bf18fe15b00e590e2c8-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/54d1f293ba75bf6ff72e8bf18fe15b00e590e2c8-1 new file mode 100644 index 00000000000..42c106084d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/54d1f293ba75bf6ff72e8bf18fe15b00e590e2c8-1 @@ -0,0 +1 @@ +55511151231257827021181583404541015625 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/55a0c930a804665fd0ae2aa914e39ec2898babaf-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/55a0c930a804665fd0ae2aa914e39ec2898babaf-7 new file mode 100644 index 00000000000..b3227882582 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/55a0c930a804665fd0ae2aa914e39ec2898babaf-7 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/56032dcc54f0d2fa736dd71962caa4e3e240abf0 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/56032dcc54f0d2fa736dd71962caa4e3e240abf0 new file mode 100644 index 00000000000..017691b2c98 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/56032dcc54f0d2fa736dd71962caa4e3e240abf0 @@ -0,0 +1 @@ +____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformatin5545 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/576280c2b993d7a52ac7c741273336f8bd9213f5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/576280c2b993d7a52ac7c741273336f8bd9213f5 new file mode 100644 index 00000000000..d60c7787987 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/576280c2b993d7a52ac7c741273336f8bd9213f5 @@ -0,0 +1 @@ +5555555555555cMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e46015555555555555555555555555555555555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e46015555555555555555555555555555552Sr_s17bWH0xbefDe0xdcBB4bB7967b8dbFeb5DDBB5555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo52 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/59d26deac1ea4b1ad0e2d65b20839f7d2d4a4b89 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/59d26deac1ea4b1ad0e2d65b20839f7d2d4a4b89 new file mode 100644 index 00000000000..53ae9d763ae Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/59d26deac1ea4b1ad0e2d65b20839f7d2d4a4b89 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5b4999d4bd9c1b77f6004595bc2e80bf6c7180af-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5b4999d4bd9c1b77f6004595bc2e80bf6c7180af-4 new file mode 100644 index 00000000000..47f56113239 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5b4999d4bd9c1b77f6004595bc2e80bf6c7180af-4 @@ -0,0 +1 @@ + 11368683393798828 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5bf1f7b62d3967f98c64b19a4b2c0d61b70e6222-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5bf1f7b62d3967f98c64b19a4b2c0d61b70e6222-2 new file mode 100644 index 00000000000..43c38c6c9d6 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/5bf1f7b62d3967f98c64b19a4b2c0d61b70e6222-2 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6548f73fac595761f3cf335b35d1ff524d67f09f-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6548f73fac595761f3cf335b35d1ff524d67f09f-5 new file mode 100644 index 00000000000..82c74a0bc15 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6548f73fac595761f3cf335b35d1ff524d67f09f-5 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/68d516dd85ef75af6a2cd31394eebab11b024fe4-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/68d516dd85ef75af6a2cd31394eebab11b024fe4-3 new file mode 100644 index 00000000000..3013fd5eaf6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/68d516dd85ef75af6a2cd31394eebab11b024fe4-3 @@ -0,0 +1 @@ +5555555555555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/69c449d595dc232b2ad88b0b48395948a23a0650-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/69c449d595dc232b2ad88b0b48395948a23a0650-2 new file mode 100644 index 00000000000..78dd032307e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/69c449d595dc232b2ad88b0b48395948a23a0650-2 @@ -0,0 +1 @@ +55511154541015625 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6afbd7493a57d48d401da68efdc79156ccc1992c-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6afbd7493a57d48d401da68efdc79156ccc1992c-1 new file mode 100644 index 00000000000..5855ad97e13 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6afbd7493a57d48d401da68efdc79156ccc1992c-1 @@ -0,0 +1 @@ +c|W %5]PREC)@55555555555555555555kenIrmati555u55555555555555555555555555555555555555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b50366cad8565e61d162e38cf031c918d305dd3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b50366cad8565e61d162e38cf031c918d305dd3 new file mode 100644 index 00000000000..aaa6794ba20 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b50366cad8565e61d162e38cf031c918d305dd3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b93512fc409ba81e69a6c818c47540e47dcb9a4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b93512fc409ba81e69a6c818c47540e47dcb9a4 new file mode 100644 index 00000000000..56b5a0f9371 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b93512fc409ba81e69a6c818c47540e47dcb9a4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b99950532c007f8f24725686d8142ddf390889b b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b99950532c007f8f24725686d8142ddf390889b new file mode 100644 index 00000000000..6b5168872e1 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6b99950532c007f8f24725686d8142ddf390889b differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6d9dd11e564c301aa0a6043ee59e00c9b0192334-8 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6d9dd11e564c301aa0a6043ee59e00c9b0192334-8 new file mode 100644 index 00000000000..80661f16bd2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6d9dd11e564c301aa0a6043ee59e00c9b0192334-8 @@ -0,0 +1 @@ +c|pW s%!(DC)5?h337S^9buos \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6e53664c27b28370ee8680e4e4dfd139b43c2850-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6e53664c27b28370ee8680e4e4dfd139b43c2850-2 new file mode 100644 index 00000000000..c69c5287770 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6e53664c27b28370ee8680e4e4dfd139b43c2850-2 @@ -0,0 +1 @@ +5555555555558777878078144567552953958511352539062555555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6ebb78761afbe6429fd06e8f0f9394cc8e061c59-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6ebb78761afbe6429fd06e8f0f9394cc8e061c59-6 new file mode 100644 index 00000000000..9adeb455348 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6ebb78761afbe6429fd06e8f0f9394cc8e061c59-6 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6ee6aab4898dc237d1504802293ff1e36e07d7d7-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6ee6aab4898dc237d1504802293ff1e36e07d7d7-2 new file mode 100644 index 00000000000..eefd0d13c42 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6ee6aab4898dc237d1504802293ff1e36e07d7d7-2 @@ -0,0 +1 @@ +--)!)')44444-01012-0!1_1_1_1_e___I0_0__8)-04414'E' \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6eea60261259d0ed848b459866a9a7ccebf25f02-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6eea60261259d0ed848b459866a9a7ccebf25f02-4 new file mode 100644 index 00000000000..2b9cf4b9566 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/6eea60261259d0ed848b459866a9a7ccebf25f02-4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/70dcd94d449aa197672f8cabcc88a21254131687 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/70dcd94d449aa197672f8cabcc88a21254131687 new file mode 100644 index 00000000000..6fedb0846ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/70dcd94d449aa197672f8cabcc88a21254131687 @@ -0,0 +1 @@ +021828168289714372377277407224532284631705654362 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/72677d19506938afc885d16fd5ca7c4be75f3752 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/72677d19506938afc885d16fd5ca7c4be75f3752 new file mode 100644 index 00000000000..2e9ae103350 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/72677d19506938afc885d16fd5ca7c4be75f3752 @@ -0,0 +1 @@ +____I_Rc__e_555555555555555555555555555404570124614e4155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformatin5545____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformati____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformatin5545____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformatinVW____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformatin5545____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e46015 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/732e566ac977d41409549e42c4b6ff6b0913b3cc-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/732e566ac977d41409549e42c4b6ff6b0913b3cc-2 new file mode 100644 index 00000000000..380ce89683b --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/732e566ac977d41409549e42c4b6ff6b0913b3cc-2 @@ -0,0 +1 @@ +M55555555555555555555555555555555|n|ne \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/747d7c993787ae22ace6ec322dc1f098c938ec1f b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/747d7c993787ae22ace6ec322dc1f098c938ec1f new file mode 100644 index 00000000000..1789c733b08 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/747d7c993787ae22ace6ec322dc1f098c938ec1f differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/75262a4c69622c9103675c86a5e6a4a3688ed3e6-8 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/75262a4c69622c9103675c86a5e6a4a3688ed3e6-8 new file mode 100644 index 00000000000..4cc1519fe8c Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/75262a4c69622c9103675c86a5e6a4a3688ed3e6-8 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/764ca341b24c6a68ad9507a31abcc33287f6c994-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/764ca341b24c6a68ad9507a31abcc33287f6c994-1 new file mode 100644 index 00000000000..219cbd8a514 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/764ca341b24c6a68ad9507a31abcc33287f6c994-1 @@ -0,0 +1 @@ +55555555555555555555555555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7b56b7ac07966b8060919498d93a47e68730d20a-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7b56b7ac07966b8060919498d93a47e68730d20a-2 new file mode 100644 index 00000000000..71ffe47266a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7b56b7ac07966b8060919498d93a47e68730d20a-2 @@ -0,0 +1 @@ +c|W %5]P5555555SetTokenInformation5545555555555578008u55555555555555555554045217012461354e46015555555555555555555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7bbcc6e3a846cb3bdfe20a0c4d34f30985ab87d3-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7bbcc6e3a846cb3bdfe20a0c4d34f30985ab87d3-1 new file mode 100644 index 00000000000..18c587f1f23 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7bbcc6e3a846cb3bdfe20a0c4d34f30985ab87d3-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7d2570a230d731b54e04320f4bce5693d5bd4176 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7d2570a230d731b54e04320f4bce5693d5bd4176 new file mode 100644 index 00000000000..8121744eb8f Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/7d2570a230d731b54e04320f4bce5693d5bd4176 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/84859b8196cb65505cda9c056cbecb4096fd4879-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/84859b8196cb65505cda9c056cbecb4096fd4879-7 new file mode 100644 index 00000000000..4c005f04c50 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/84859b8196cb65505cda9c056cbecb4096fd4879-7 @@ -0,0 +1 @@ +c|pW s%!(BADPREC)563837319boo� \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/889fdb56e3b7b79e51e66ed1c47fb0b60970d054-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/889fdb56e3b7b79e51e66ed1c47fb0b60970d054-2 new file mode 100644 index 00000000000..01e940728ed --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/889fdb56e3b7b79e51e66ed1c47fb0b60970d054-2 @@ -0,0 +1 @@ +c|u \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/8c562007589132e496325ea892733466e03a06cb b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/8c562007589132e496325ea892733466e03a06cb new file mode 100644 index 00000000000..7b6897ed674 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/8c562007589132e496325ea892733466e03a06cb differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/8e78a58162034efce763c158087c6f1e427c1260 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/8e78a58162034efce763c158087c6f1e427c1260 new file mode 100644 index 00000000000..53c76fd27b4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/8e78a58162034efce763c158087c6f1e427c1260 @@ -0,0 +1 @@ +55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9150349979ceed8b2f6db6e0ed836462cd27d499-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9150349979ceed8b2f6db6e0ed836462cd27d499-1 new file mode 100644 index 00000000000..eb0011dcd0c --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9150349979ceed8b2f6db6e0ed836462cd27d499-1 @@ -0,0 +1 @@ +188281282812-00b \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/91a9bc8bbcab0dc05bedaf5f4aa8d383c1651fb2-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/91a9bc8bbcab0dc05bedaf5f4aa8d383c1651fb2-5 new file mode 100644 index 00000000000..74209f175fb Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/91a9bc8bbcab0dc05bedaf5f4aa8d383c1651fb2-5 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9278d66afee65127b44cdd505e55580e1faa2377-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9278d66afee65127b44cdd505e55580e1faa2377-1 new file mode 100644 index 00000000000..9e7add2cbad Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9278d66afee65127b44cdd505e55580e1faa2377-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9300f0f580a7b8b7cb3a7163a48ed0771c72fb1b b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9300f0f580a7b8b7cb3a7163a48ed0771c72fb1b new file mode 100644 index 00000000000..498e5b9a81a Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9300f0f580a7b8b7cb3a7163a48ed0771c72fb1b differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9355e3410f2f344ecd5dd798819020a35461c2df-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9355e3410f2f344ecd5dd798819020a35461c2df-2 new file mode 100644 index 00000000000..49da5385e9a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9355e3410f2f344ecd5dd798819020a35461c2df-2 @@ -0,0 +1 @@ +c|u| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9544d0384139d241f81c841cb63f4a6bf2c3d55e-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9544d0384139d241f81c841cb63f4a6bf2c3d55e-4 new file mode 100644 index 00000000000..f74237f4413 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9544d0384139d241f81c841cb63f4a6bf2c3d55e-4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9802e09660b74c259960141c89402b5fdececf3f b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9802e09660b74c259960141c89402b5fdececf3f new file mode 100644 index 00000000000..f205de9a076 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9802e09660b74c259960141c89402b5fdececf3f differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/98840eab4f9516cedcdcc88302b3639672a42f06-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/98840eab4f9516cedcdcc88302b3639672a42f06-2 new file mode 100644 index 00000000000..d14c1aeabf4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/98840eab4f9516cedcdcc88302b3639672a42f06-2 @@ -0,0 +1 @@ +5555555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9a71d9b48c814eb1b0db3133482c06a079de2bc8-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9a71d9b48c814eb1b0db3133482c06a079de2bc8-4 new file mode 100644 index 00000000000..75f22e39a6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9a71d9b48c814eb1b0db3133482c06a079de2bc8-4 @@ -0,0 +1 @@ +8673617379884547205962240695953369140625c|n|name flag fieldu \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9a86f34bfd765bab80845da7128ab574d6abdef1-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9a86f34bfd765bab80845da7128ab574d6abdef1-5 new file mode 100644 index 00000000000..2895213ba0e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9a86f34bfd765bab80845da7128ab574d6abdef1-5 @@ -0,0 +1 @@ +i/o timeout|pW272204682680184470K[Cu| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9cd2374f626d4c55ce9389368b428a74be518319 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9cd2374f626d4c55ce9389368b428a74be518319 new file mode 100644 index 00000000000..3ff4e47b1b7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9cd2374f626d4c55ce9389368b428a74be518319 @@ -0,0 +1 @@ +____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9d52d6b15a5ee8cd7a575bac4dec1025573ad062-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9d52d6b15a5ee8cd7a575bac4dec1025573ad062-2 new file mode 100644 index 00000000000..73a4ac489f2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9d52d6b15a5ee8cd7a575bac4dec1025573ad062-2 @@ -0,0 +1 @@ +01234567u89abfdec \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9e6953ce446955eed1bee2d9819461068d2d37f6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9e6953ce446955eed1bee2d9819461068d2d37f6 new file mode 100644 index 00000000000..a1b2e0f4a01 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/9e6953ce446955eed1bee2d9819461068d2d37f6 @@ -0,0 +1 @@ +5555555555555555555555555555555555555555555555555555555555555555554555555555555555555555555555555555555555555555555555555555555555555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a0704a231fa435579a323a222d91a03fe3b5cbc2-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a0704a231fa435579a323a222d91a03fe3b5cbc2-6 new file mode 100644 index 00000000000..c06a6c889c1 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a0704a231fa435579a323a222d91a03fe3b5cbc2-6 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a1f084b3911d33e6b41f2ba29efbf9140800444e-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a1f084b3911d33e6b41f2ba29efbf9140800444e-3 new file mode 100644 index 00000000000..1b1bb744d9a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a1f084b3911d33e6b41f2ba29efbf9140800444e-3 @@ -0,0 +1 @@ +5551115455625 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a3953bd6a594600ab51268955ecee6fea8be4709-8 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a3953bd6a594600ab51268955ecee6fea8be4709-8 new file mode 100644 index 00000000000..a6decd14647 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a3953bd6a594600ab51268955ecee6fea8be4709-8 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a61e7f9a73cdb1f8e21d1cc341429f15e1db3172-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a61e7f9a73cdb1f8e21d1cc341429f15e1db3172-1 new file mode 100644 index 00000000000..889de47153a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a61e7f9a73cdb1f8e21d1cc341429f15e1db3172-1 @@ -0,0 +1 @@ +5555555555555555555555555555554045555555555555555015555555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a8e6fe772a578fe63546761ea1c195ff61203810-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a8e6fe772a578fe63546761ea1c195ff61203810-4 new file mode 100644 index 00000000000..9379793e673 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/a8e6fe772a578fe63546761ea1c195ff61203810-4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/aed10bc423e56534538d83702d4be2aa281549a9-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/aed10bc423e56534538d83702d4be2aa281549a9-4 new file mode 100644 index 00000000000..fda02eed17f --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/aed10bc423e56534538d83702d4be2aa281549a9-4 @@ -0,0 +1 @@ +555545555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b13d2b8e3724d1787d2105c1a4e3220e1e690339-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b13d2b8e3724d1787d2105c1a4e3220e1e690339-1 new file mode 100644 index 00000000000..290d77bfd90 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b13d2b8e3724d1787d2105c1a4e3220e1e690339-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b1dc3183c3a0c093dd7b36bb8ef2a01c20e45635-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b1dc3183c3a0c093dd7b36bb8ef2a01c20e45635-4 new file mode 100644 index 00000000000..08df4b67dc3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b1dc3183c3a0c093dd7b36bb8ef2a01c20e45635-4 @@ -0,0 +1 @@ +55555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b1f5380ef7c0a96f102e2ac2fed5e4a227a0d9f4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b1f5380ef7c0a96f102e2ac2fed5e4a227a0d9f4 new file mode 100644 index 00000000000..bef0926e420 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b1f5380ef7c0a96f102e2ac2fed5e4a227a0d9f4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b22334c4d7507820a3b901dcba18b20a74b28f22-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b22334c4d7507820a3b901dcba18b20a74b28f22-7 new file mode 100644 index 00000000000..1d3f9d707f0 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b22334c4d7507820a3b901dcba18b20a74b28f22-7 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b4b2324b290ec96791a1ad910fd62d8b6d5b32cc-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b4b2324b290ec96791a1ad910fd62d8b6d5b32cc-1 new file mode 100644 index 00000000000..578df8cf733 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b4b2324b290ec96791a1ad910fd62d8b6d5b32cc-1 @@ -0,0 +1 @@ +5555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b60c99af5868c013f98cdce9781b4fffeed81f30 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b60c99af5868c013f98cdce9781b4fffeed81f30 new file mode 100644 index 00000000000..d7ccd0bbbde --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b60c99af5868c013f98cdce9781b4fffeed81f30 @@ -0,0 +1 @@ +____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformatin5545____I_Rc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformati \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b62b1d73848fde9b74a166239cef1a76506a9e41-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b62b1d73848fde9b74a166239cef1a76506a9e41-1 new file mode 100644 index 00000000000..3beb5813dfb --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b62b1d73848fde9b74a166239cef1a76506a9e41-1 @@ -0,0 +1 @@ +c|W 0^&554827056640123456789abcdghijklmnpq828V12-0rstuvwxyz]=!(BADPREC)68@'E>103ĉs){1Kb55555555555555555404521701246134e460155555555555555555555555555SetTokenInformation5555555555555555555555555555404521701246134e460155555555555555555555555555SetTokenInformation554555555555555555555555555555555404521701246134e460155555555555555555555555555SetTokenInformation5555555555555555555555555555404521701246134e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555655555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation554555555555555555555555555555555404521701246551354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e46015555555555555555555555SetTokenInforma____I8_JRc__e_5555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation5545555555555555555555555555555554045217012461354e460155555555555555555555555555SetTokenInformation55555555555555555555555555554045217012461354e460155 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b7b180de011becb75b415d308134a35b1c3e075f-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b7b180de011becb75b415d308134a35b1c3e075f-1 new file mode 100644 index 00000000000..425f1bba546 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/b7b180de011becb75b415d308134a35b1c3e075f-1 @@ -0,0 +1 @@ +c|u|l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/bdd90d98c50056c82fa01cdee2d91ef30112c4d7-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/bdd90d98c50056c82fa01cdee2d91ef30112c4d7-1 new file mode 100644 index 00000000000..ae33c1e2fd4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/bdd90d98c50056c82fa01cdee2d91ef30112c4d7-1 @@ -0,0 +1 @@ +M5555555555555555555555555555555|n|ne \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c0a4a69dff77a11d744507864de0a15d000afd3f-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c0a4a69dff77a11d744507864de0a15d000afd3f-5 new file mode 100644 index 00000000000..6c9c10f29d4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c0a4a69dff77a11d744507864de0a15d000afd3f-5 @@ -0,0 +1 @@ +||34651953614188823848962783813 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c372591e0c943653714cdc9b1e52f6b0a49a0ab6-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c372591e0c943653714cdc9b1e52f6b0a49a0ab6-3 new file mode 100644 index 00000000000..07072e39af3 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c372591e0c943653714cdc9b1e52f6b0a49a0ab6-3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c3d814235d50a4f96ced41bf5fc9c53c4e408765 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c3d814235d50a4f96ced41bf5fc9c53c4e408765 new file mode 100644 index 00000000000..4b12f9d3272 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c3d814235d50a4f96ced41bf5fc9c53c4e408765 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c43bfe87577eecc822788ffeb9dadf2cd7b8d2f0 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c43bfe87577eecc822788ffeb9dadf2cd7b8d2f0 new file mode 100644 index 00000000000..72996c8c25f Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c43bfe87577eecc822788ffeb9dadf2cd7b8d2f0 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c7198f2cc1dd9bffea474e6eb4b189ecdf2f965d b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c7198f2cc1dd9bffea474e6eb4b189ecdf2f965d new file mode 100644 index 00000000000..9e0c0cec3b3 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c7198f2cc1dd9bffea474e6eb4b189ecdf2f965d differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c775292f392c027f7d24e5a1c84b1dd176c03f32 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c775292f392c027f7d24e5a1c84b1dd176c03f32 new file mode 100644 index 00000000000..f7217c9db48 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/c775292f392c027f7d24e5a1c84b1dd176c03f32 @@ -0,0 +1 @@ +01555554555555555555555555555555555554045555555555555555015555555555555555555555555555554555555555555555555555555540455555555555555550155555555555555555555555555555455555555555555555555555555555540455555555555555550155555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/ca269332cbd02cb17415e5cfc4ef7403875ef755-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/ca269332cbd02cb17415e5cfc4ef7403875ef755-5 new file mode 100644 index 00000000000..d8ea44d24ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/ca269332cbd02cb17415e5cfc4ef7403875ef755-5 @@ -0,0 +1 @@ +c|أB551X \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/cedd771f15bd51b21b91d2535588e6bfd943456e b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/cedd771f15bd51b21b91d2535588e6bfd943456e new file mode 100644 index 00000000000..fef1b680bd1 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/cedd771f15bd51b21b91d2535588e6bfd943456e differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d22b7d7f10d50e7cd68cfb65ac60ce2af5e05f0a-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d22b7d7f10d50e7cd68cfb65ac60ce2af5e05f0a-5 new file mode 100644 index 00000000000..d93676d56d8 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d22b7d7f10d50e7cd68cfb65ac60ce2af5e05f0a-5 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d24d432b5104847324add3b047f71cfbb2cf11a2-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d24d432b5104847324add3b047f71cfbb2cf11a2-5 new file mode 100644 index 00000000000..ddca4f92ff0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d24d432b5104847324add3b047f71cfbb2cf11a2-5 @@ -0,0 +1 @@ +f|p GtCorNamA+eExW-809244u \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d2a3b921d428c6ef1dc976623d0fd3914b8b5924-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d2a3b921d428c6ef1dc976623d0fd3914b8b5924-7 new file mode 100644 index 00000000000..ba5d70a315c --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d2a3b921d428c6ef1dc976623d0fd3914b8b5924-7 @@ -0,0 +1 @@ +c|pW s%!(BADPREC)568170319boo�lu \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d5c5747ded386a1de2be97f8fa56f27dbf866331-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d5c5747ded386a1de2be97f8fa56f27dbf866331-3 new file mode 100644 index 00000000000..08e5ae33491 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d5c5747ded386a1de2be97f8fa56f27dbf866331-3 @@ -0,0 +1 @@ +yoverflow on character value \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d68e96f1fdefcc1ca114970b7d9ef1cbd5a0cc50-10 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d68e96f1fdefcc1ca114970b7d9ef1cbd5a0cc50-10 new file mode 100644 index 00000000000..044fc1350d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d68e96f1fdefcc1ca114970b7d9ef1cbd5a0cc50-10 @@ -0,0 +1 @@ +|W 7%B5S012349acdefghijklmnpqrstuvwxyz]=!(BADPREC)8@'E>1031Kboo�l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d9af7dc7888daf056627088aef8cfadd5817660c-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d9af7dc7888daf056627088aef8cfadd5817660c-6 new file mode 100644 index 00000000000..421673d6534 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/d9af7dc7888daf056627088aef8cfadd5817660c-6 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/dbada9d56b10891ca136d80794bb93ad9e58293a-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/dbada9d56b10891ca136d80794bb93ad9e58293a-7 new file mode 100644 index 00000000000..2a7c944e26e Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/dbada9d56b10891ca136d80794bb93ad9e58293a-7 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/de357612a902159ac2a91a9cd6b0d7287d55e968-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/de357612a902159ac2a91a9cd6b0d7287d55e968-6 new file mode 100644 index 00000000000..e42b80007a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/de357612a902159ac2a91a9cd6b0d7287d55e968-6 @@ -0,0 +1 @@ +<557555555559 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/de7d905f4388e2955b0a11f42517882f0ff5792b-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/de7d905f4388e2955b0a11f42517882f0ff5792b-2 new file mode 100644 index 00000000000..5bba876961d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/de7d905f4388e2955b0a11f42517882f0ff5792b-2 @@ -0,0 +1 @@ +|u|l \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/decd27d54bcedc965463c545a051f44c140cfb6b-3 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/decd27d54bcedc965463c545a051f44c140cfb6b-3 new file mode 100644 index 00000000000..05a2d49b27a Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/decd27d54bcedc965463c545a051f44c140cfb6b-3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e0355b1d80ab87e740c6493abf4b7e532c89d3a6-2 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e0355b1d80ab87e740c6493abf4b7e532c89d3a6-2 new file mode 100644 index 00000000000..47d9fafbd17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e0355b1d80ab87e740c6493abf4b7e532c89d3a6-2 @@ -0,0 +1 @@ +55555555555555555555555555554045555555555555555015555555555555555555Sz_x_q_5 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e3b22b4eb898b68dde1c73dab4629c38025031db-5 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e3b22b4eb898b68dde1c73dab4629c38025031db-5 new file mode 100644 index 00000000000..051fbb7eb7f --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e3b22b4eb898b68dde1c73dab4629c38025031db-5 @@ -0,0 +1 @@ +c|pW s')0567383170319432u| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e55f990e122023ab58626ef27e06422b7c4be053 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e55f990e122023ab58626ef27e06422b7c4be053 new file mode 100644 index 00000000000..d0bba70de7b Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e55f990e122023ab58626ef27e06422b7c4be053 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e77d34eb148eb0a3b30feffc7e9b56cd182dc74c b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e77d34eb148eb0a3b30feffc7e9b56cd182dc74c new file mode 100644 index 00000000000..309beb4236d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/e77d34eb148eb0a3b30feffc7e9b56cd182dc74c @@ -0,0 +1 @@ +5555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e460155555555555555555555555555555545555555555555etProcessMemoryInfo5217012461354e4601555555555555555555555555555555 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/ed6e6eea47cde68b49dcadd93eb14cb6db432ff2-7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/ed6e6eea47cde68b49dcadd93eb14cb6db432ff2-7 new file mode 100644 index 00000000000..c110ac8631b Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/ed6e6eea47cde68b49dcadd93eb14cb6db432ff2-7 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f52fb64e8823fdebdb11ae1c231358fcce57e638-6 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f52fb64e8823fdebdb11ae1c231358fcce57e638-6 new file mode 100644 index 00000000000..b180e5c8510 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f52fb64e8823fdebdb11ae1c231358fcce57e638-6 @@ -0,0 +1 @@ +c|pW s%!(BADPREC)56383170319boo�lu \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f62ba7c4e99c6ba70e8f457f28ef45f797b9992a-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f62ba7c4e99c6ba70e8f457f28ef45f797b9992a-1 new file mode 100644 index 00000000000..671ee03a0cb Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f62ba7c4e99c6ba70e8f457f28ef45f797b9992a-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f748f012d8d9bd2b168aa96c529658761dd648a4-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f748f012d8d9bd2b168aa96c529658761dd648a4-1 new file mode 100644 index 00000000000..0a923867c72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f748f012d8d9bd2b168aa96c529658761dd648a4-1 @@ -0,0 +1 @@ +515111512312578270211815834045410156250xD95782702118158340454101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABbeDddDfd9eF6C55511151231257827021181583404� 54101562555511151231257827021181583404541015625065072254514016141043235136515203622404016.0xfCE3BdA64ABf82208f6CDcE92fbE6 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f8f96538c274d717ccca38324c9aa658e67180dd-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f8f96538c274d717ccca38324c9aa658e67180dd-1 new file mode 100644 index 00000000000..1cf2d975750 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/f8f96538c274d717ccca38324c9aa658e67180dd-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fad75398a55edd191340909bf867610b6bc72ff7 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fad75398a55edd191340909bf867610b6bc72ff7 new file mode 100644 index 00000000000..cc2715c20a7 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fad75398a55edd191340909bf867610b6bc72ff7 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fb51f26fe62e769ba34f569454121f46f2a4d277-8 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fb51f26fe62e769ba34f569454121f46f2a4d277-8 new file mode 100644 index 00000000000..9f1ef19abbc --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fb51f26fe62e769ba34f569454121f46f2a4d277-8 @@ -0,0 +1 @@ +f|W s't)05678too few operands for format '%19432u2 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fca6bc86ec8711a1e7745c03237087d55d936a67 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fca6bc86ec8711a1e7745c03237087d55d936a67 new file mode 100644 index 00000000000..7de821dcb79 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fca6bc86ec8711a1e7745c03237087d55d936a67 @@ -0,0 +1 @@ +55555555555555555555555555555540455555555555555550155555555555555555555555555555545555555555555555555555555555554045555555555555555015555555555555555555555555555554 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fd155cfb10f96df4544263f5d2164351ae97f806-4 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fd155cfb10f96df4544263f5d2164351ae97f806-4 new file mode 100644 index 00000000000..8661a0a620a Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fd155cfb10f96df4544263f5d2164351ae97f806-4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fe5567e8d769550852182cdf69d74bb16dff8e29-1 b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fe5567e8d769550852182cdf69d74bb16dff8e29-1 new file mode 100644 index 00000000000..454f6b314bf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/compress/corpus/fe5567e8d769550852182cdf69d74bb16dff8e29-1 @@ -0,0 +1 @@ +0123456789abcdef \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/001e59af3c698337b5449d856ef447fd0c0b47ff b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/001e59af3c698337b5449d856ef447fd0c0b47ff new file mode 100644 index 00000000000..f39e43d33e6 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/001e59af3c698337b5449d856ef447fd0c0b47ff differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/03648029374f39fc083b90d39df1f80ecc1fa78e-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/03648029374f39fc083b90d39df1f80ecc1fa78e-1 new file mode 100644 index 00000000000..75ee5ed48cf Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/03648029374f39fc083b90d39df1f80ecc1fa78e-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/057294475853d7ff15d86d889f592eb7ba6b2e72-8 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/057294475853d7ff15d86d889f592eb7ba6b2e72-8 new file mode 100644 index 00000000000..853c9ec0629 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/057294475853d7ff15d86d889f592eb7ba6b2e72-8 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/067422cb1d02d21c8fc4d86a4b00c126d3f24699-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/067422cb1d02d21c8fc4d86a4b00c126d3f24699-2 new file mode 100644 index 00000000000..84f9fbd218f Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/067422cb1d02d21c8fc4d86a4b00c126d3f24699-2 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0c33deff4510591c4ec94593a4d13f9d899523e4-5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0c33deff4510591c4ec94593a4d13f9d899523e4-5 new file mode 100644 index 00000000000..9997a2472b2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0c33deff4510591c4ec94593a4d13f9d899523e4-5 @@ -0,0 +1 @@ +oG5 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0d67d398462565f4d9e21f5efe9b0a6816e8962c-3 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0d67d398462565f4d9e21f5efe9b0a6816e8962c-3 new file mode 100644 index 00000000000..cbbe7d8c4d7 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0d67d398462565f4d9e21f5efe9b0a6816e8962c-3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0ead65857937e78706dcaf139c616ef71533b94d-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0ead65857937e78706dcaf139c616ef71533b94d-2 new file mode 100644 index 00000000000..c928bb19370 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/0ead65857937e78706dcaf139c616ef71533b94d-2 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/19aa2a437fa2a48199cc0294469a0a942ae1483a-4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/19aa2a437fa2a48199cc0294469a0a942ae1483a-4 new file mode 100644 index 00000000000..3a59d7fa3ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/19aa2a437fa2a48199cc0294469a0a942ae1483a-4 @@ -0,0 +1 @@ +t \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/1bf17f213e2aee50a14d920aa1c615edab1b4d11 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/1bf17f213e2aee50a14d920aa1c615edab1b4d11 new file mode 100644 index 00000000000..8b2941c1872 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/1bf17f213e2aee50a14d920aa1c615edab1b4d11 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/1fd68df0abe0dea0a5cf30153dcf48b5684a77de-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/1fd68df0abe0dea0a5cf30153dcf48b5684a77de-1 new file mode 100644 index 00000000000..fc5c08fdabb --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/1fd68df0abe0dea0a5cf30153dcf48b5684a77de-1 @@ -0,0 +1 @@ +g| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/26157894111c075cd978a0a67a1bd2c6184e92d6-3 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/26157894111c075cd978a0a67a1bd2c6184e92d6-3 new file mode 100644 index 00000000000..0d77c1bbcd0 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/26157894111c075cd978a0a67a1bd2c6184e92d6-3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/26cc3dcead75312b7037f0c81d2a88ea04cf2c48 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/26cc3dcead75312b7037f0c81d2a88ea04cf2c48 new file mode 100644 index 00000000000..4df3ef1b91e Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/26cc3dcead75312b7037f0c81d2a88ea04cf2c48 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/31a1eb16673086976295a4042ccef7fa443503df-11 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/31a1eb16673086976295a4042ccef7fa443503df-11 new file mode 100644 index 00000000000..6a1d007f825 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/31a1eb16673086976295a4042ccef7fa443503df-11 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/326998db244ec22e4545688efc3fcf761edcfa47 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/326998db244ec22e4545688efc3fcf761edcfa47 new file mode 100644 index 00000000000..6993da704d8 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/326998db244ec22e4545688efc3fcf761edcfa47 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/32796a5dc0d50e6c16a3400af331c4a1788d2620-4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/32796a5dc0d50e6c16a3400af331c4a1788d2620-4 new file mode 100644 index 00000000000..0b8c80592bd --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/32796a5dc0d50e6c16a3400af331c4a1788d2620-4 @@ -0,0 +1 @@ +\ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/327ed0588eb0b927e81d9b9adfd8aa98a1a5081d-10 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/327ed0588eb0b927e81d9b9adfd8aa98a1a5081d-10 new file mode 100644 index 00000000000..f32f69a7211 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/327ed0588eb0b927e81d9b9adfd8aa98a1a5081d-10 @@ -0,0 +1 @@ +)$$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/33844260640d4b3bc276376be9b5b5a1c53350cb-8 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/33844260640d4b3bc276376be9b5b5a1c53350cb-8 new file mode 100644 index 00000000000..d0ba7d667ed --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/33844260640d4b3bc276376be9b5b5a1c53350cb-8 @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3b3dc5be03fd492a33bd331979fb5498f5df1e52-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3b3dc5be03fd492a33bd331979fb5498f5df1e52-1 new file mode 100644 index 00000000000..acd8acb7109 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3b3dc5be03fd492a33bd331979fb5498f5df1e52-1 @@ -0,0 +1 @@ +%ḒoGhK5h2JSZܼi,Gsy;8s! \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3d648fdd4eee536b735033f7eb135b28ca5c8526 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3d648fdd4eee536b735033f7eb135b28ca5c8526 new file mode 100644 index 00000000000..13471a03c9e Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3d648fdd4eee536b735033f7eb135b28ca5c8526 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3da89ee273be13437e7ecf760f3fbd4dc0e8d1fe-7 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3da89ee273be13437e7ecf760f3fbd4dc0e8d1fe-7 new file mode 100644 index 00000000000..d87b4e2b44e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3da89ee273be13437e7ecf760f3fbd4dc0e8d1fe-7 @@ -0,0 +1 @@ +) \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3e9b93aed9d65aab72e65351b433bce4a091ab0d b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3e9b93aed9d65aab72e65351b433bce4a091ab0d new file mode 100644 index 00000000000..3d0765d8514 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/3e9b93aed9d65aab72e65351b433bce4a091ab0d differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/40259a0dccc3648661894e1cb9d43ecb18c1e94f-4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/40259a0dccc3648661894e1cb9d43ecb18c1e94f-4 new file mode 100644 index 00000000000..2a39463f424 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/40259a0dccc3648661894e1cb9d43ecb18c1e94f-4 @@ -0,0 +1 @@ +|9(-{߼'0Qj \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/440ac3e84eb89ba7c864261c36567ec92f111be5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/440ac3e84eb89ba7c864261c36567ec92f111be5 new file mode 100644 index 00000000000..67409b6a0e7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/440ac3e84eb89ba7c864261c36567ec92f111be5 @@ -0,0 +1 @@ +G \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4a0e9c56d9b4d655b8c750e01b63aaee512c79dd b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4a0e9c56d9b4d655b8c750e01b63aaee512c79dd new file mode 100644 index 00000000000..5c2743e54fa --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4a0e9c56d9b4d655b8c750e01b63aaee512c79dd @@ -0,0 +1 @@ +5Asws7upΧ$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4d92faede7356a8aa5c0fd7dc7df1cbf6ec7bc54-4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4d92faede7356a8aa5c0fd7dc7df1cbf6ec7bc54-4 new file mode 100644 index 00000000000..2a21c7e8a35 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4d92faede7356a8aa5c0fd7dc7df1cbf6ec7bc54-4 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4f480dad3c4e936864adaa7707bac06e59272d96 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4f480dad3c4e936864adaa7707bac06e59272d96 new file mode 100644 index 00000000000..8e3f1c87f09 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/4f480dad3c4e936864adaa7707bac06e59272d96 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/500ce5edbda324f9a385f06a24094cee873bcf07-6 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/500ce5edbda324f9a385f06a24094cee873bcf07-6 new file mode 100644 index 00000000000..7e1fe91c4ec --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/500ce5edbda324f9a385f06a24094cee873bcf07-6 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5135c69c6aca6f4aad9cd65660398fc3a7bce387-5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5135c69c6aca6f4aad9cd65660398fc3a7bce387-5 new file mode 100644 index 00000000000..71c7a0184b1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5135c69c6aca6f4aad9cd65660398fc3a7bce387-5 @@ -0,0 +1 @@ +P \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/545816fc75823315f58483bdc08de39b4b26f487-6 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/545816fc75823315f58483bdc08de39b4b26f487-6 new file mode 100644 index 00000000000..45e5287d195 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/545816fc75823315f58483bdc08de39b4b26f487-6 @@ -0,0 +1 @@ +t \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/54d1f293ba75bf6ff72e8bf18fe15b00e590e2c8-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/54d1f293ba75bf6ff72e8bf18fe15b00e590e2c8-1 new file mode 100644 index 00000000000..4b539d3b2c3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/54d1f293ba75bf6ff72e8bf18fe15b00e590e2c8-1 @@ -0,0 +1 @@ +S" %'؎ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/555d303800fd439e509e0720e6d7cf04f744ec2f-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/555d303800fd439e509e0720e6d7cf04f744ec2f-2 new file mode 100644 index 00000000000..f774831a5ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/555d303800fd439e509e0720e6d7cf04f744ec2f-2 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/55a0c930a804665fd0ae2aa914e39ec2898babaf-7 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/55a0c930a804665fd0ae2aa914e39ec2898babaf-7 new file mode 100644 index 00000000000..177989c1ec4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/55a0c930a804665fd0ae2aa914e39ec2898babaf-7 @@ -0,0 +1 @@ +v$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/56032dcc54f0d2fa736dd71962caa4e3e240abf0 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/56032dcc54f0d2fa736dd71962caa4e3e240abf0 new file mode 100644 index 00000000000..111b958427e Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/56032dcc54f0d2fa736dd71962caa4e3e240abf0 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/566fbce2e12fd9efdb9165b796da786e66bf036c-9 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/566fbce2e12fd9efdb9165b796da786e66bf036c-9 new file mode 100644 index 00000000000..a335ece431e --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/566fbce2e12fd9efdb9165b796da786e66bf036c-9 @@ -0,0 +1 @@ +)$$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/576280c2b993d7a52ac7c741273336f8bd9213f5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/576280c2b993d7a52ac7c741273336f8bd9213f5 new file mode 100644 index 00000000000..8b2c799921c Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/576280c2b993d7a52ac7c741273336f8bd9213f5 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/577c882a297cbdf4233695231e12accbcc2abdcd-4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/577c882a297cbdf4233695231e12accbcc2abdcd-4 new file mode 100644 index 00000000000..fa726d05147 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/577c882a297cbdf4233695231e12accbcc2abdcd-4 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/59d26deac1ea4b1ad0e2d65b20839f7d2d4a4b89 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/59d26deac1ea4b1ad0e2d65b20839f7d2d4a4b89 new file mode 100644 index 00000000000..ed76f77db1f Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/59d26deac1ea4b1ad0e2d65b20839f7d2d4a4b89 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5bf1f7b62d3967f98c64b19a4b2c0d61b70e6222-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5bf1f7b62d3967f98c64b19a4b2c0d61b70e6222-2 new file mode 100644 index 00000000000..f6da6a9a2f7 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5bf1f7b62d3967f98c64b19a4b2c0d61b70e6222-2 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5c795ad342ff2a0f69ada3f3382ba05013387264-8 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5c795ad342ff2a0f69ada3f3382ba05013387264-8 new file mode 100644 index 00000000000..4458a78bd24 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/5c795ad342ff2a0f69ada3f3382ba05013387264-8 @@ -0,0 +1 @@ +$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/64426d08b2cefc746ec73d04daae35a27dd53ccf b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/64426d08b2cefc746ec73d04daae35a27dd53ccf new file mode 100644 index 00000000000..70357807408 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/64426d08b2cefc746ec73d04daae35a27dd53ccf @@ -0,0 +1 @@ +P&d. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6548f73fac595761f3cf335b35d1ff524d67f09f-5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6548f73fac595761f3cf335b35d1ff524d67f09f-5 new file mode 100644 index 00000000000..f3fa26756f8 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6548f73fac595761f3cf335b35d1ff524d67f09f-5 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/68d516dd85ef75af6a2cd31394eebab11b024fe4-3 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/68d516dd85ef75af6a2cd31394eebab11b024fe4-3 new file mode 100644 index 00000000000..811db0c88f8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/68d516dd85ef75af6a2cd31394eebab11b024fe4-3 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/69c449d595dc232b2ad88b0b48395948a23a0650-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/69c449d595dc232b2ad88b0b48395948a23a0650-2 new file mode 100644 index 00000000000..865995f6f09 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/69c449d595dc232b2ad88b0b48395948a23a0650-2 @@ -0,0 +1 @@ +|CO \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b50366cad8565e61d162e38cf031c918d305dd3 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b50366cad8565e61d162e38cf031c918d305dd3 new file mode 100644 index 00000000000..9fafedce925 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b50366cad8565e61d162e38cf031c918d305dd3 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b93512fc409ba81e69a6c818c47540e47dcb9a4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b93512fc409ba81e69a6c818c47540e47dcb9a4 new file mode 100644 index 00000000000..81f5c356f56 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b93512fc409ba81e69a6c818c47540e47dcb9a4 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b99950532c007f8f24725686d8142ddf390889b b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b99950532c007f8f24725686d8142ddf390889b new file mode 100644 index 00000000000..0b75ab3883e Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6b99950532c007f8f24725686d8142ddf390889b differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6ce2e7227237a64b8bc0b5c27f018bfd2a2227f2-5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6ce2e7227237a64b8bc0b5c27f018bfd2a2227f2-5 new file mode 100644 index 00000000000..c6b0df05ca0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6ce2e7227237a64b8bc0b5c27f018bfd2a2227f2-5 @@ -0,0 +1 @@ +| \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6e53664c27b28370ee8680e4e4dfd139b43c2850-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6e53664c27b28370ee8680e4e4dfd139b43c2850-2 new file mode 100644 index 00000000000..4bbe3863bf5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6e53664c27b28370ee8680e4e4dfd139b43c2850-2 @@ -0,0 +1,2 @@ +AF;xs{ +K  \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6eb5eee33ab96304ffc66043a961108878b0565f-9 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6eb5eee33ab96304ffc66043a961108878b0565f-9 new file mode 100644 index 00000000000..2d1ee9e4f24 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6eb5eee33ab96304ffc66043a961108878b0565f-9 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6ebb78761afbe6429fd06e8f0f9394cc8e061c59-6 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6ebb78761afbe6429fd06e8f0f9394cc8e061c59-6 new file mode 100644 index 00000000000..0737403ea95 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/6ebb78761afbe6429fd06e8f0f9394cc8e061c59-6 @@ -0,0 +1 @@ +4 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/72677d19506938afc885d16fd5ca7c4be75f3752 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/72677d19506938afc885d16fd5ca7c4be75f3752 new file mode 100644 index 00000000000..ed8c6b50647 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/72677d19506938afc885d16fd5ca7c4be75f3752 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/747d7c993787ae22ace6ec322dc1f098c938ec1f b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/747d7c993787ae22ace6ec322dc1f098c938ec1f new file mode 100644 index 00000000000..d37c3272249 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/747d7c993787ae22ace6ec322dc1f098c938ec1f differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/75262a4c69622c9103675c86a5e6a4a3688ed3e6-8 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/75262a4c69622c9103675c86a5e6a4a3688ed3e6-8 new file mode 100644 index 00000000000..b5a4227d142 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/75262a4c69622c9103675c86a5e6a4a3688ed3e6-8 @@ -0,0 +1 @@ + 76 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/764ca341b24c6a68ad9507a31abcc33287f6c994-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/764ca341b24c6a68ad9507a31abcc33287f6c994-1 new file mode 100644 index 00000000000..d520c282f36 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/764ca341b24c6a68ad9507a31abcc33287f6c994-1 @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/7b56b7ac07966b8060919498d93a47e68730d20a-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/7b56b7ac07966b8060919498d93a47e68730d20a-2 new file mode 100644 index 00000000000..16fd1ce84a7 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/7b56b7ac07966b8060919498d93a47e68730d20a-2 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/7bbcc6e3a846cb3bdfe20a0c4d34f30985ab87d3-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/7bbcc6e3a846cb3bdfe20a0c4d34f30985ab87d3-1 new file mode 100644 index 00000000000..1aa43819c4d --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/7bbcc6e3a846cb3bdfe20a0c4d34f30985ab87d3-1 @@ -0,0 +1 @@ +_!8L4Qժ?w(&R@´ 6ƹ\$=lƹP@6B)-=<䬣` \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/8c562007589132e496325ea892733466e03a06cb b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/8c562007589132e496325ea892733466e03a06cb new file mode 100644 index 00000000000..4440cd5e455 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/8c562007589132e496325ea892733466e03a06cb differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/8e78a58162034efce763c158087c6f1e427c1260 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/8e78a58162034efce763c158087c6f1e427c1260 new file mode 100644 index 00000000000..b6b0522b6bd Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/8e78a58162034efce763c158087c6f1e427c1260 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9278d66afee65127b44cdd505e55580e1faa2377-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9278d66afee65127b44cdd505e55580e1faa2377-1 new file mode 100644 index 00000000000..c874c8c51a3 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9278d66afee65127b44cdd505e55580e1faa2377-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9300f0f580a7b8b7cb3a7163a48ed0771c72fb1b b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9300f0f580a7b8b7cb3a7163a48ed0771c72fb1b new file mode 100644 index 00000000000..c5cc8a95749 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9300f0f580a7b8b7cb3a7163a48ed0771c72fb1b differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9802e09660b74c259960141c89402b5fdececf3f b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9802e09660b74c259960141c89402b5fdececf3f new file mode 100644 index 00000000000..deb80bbd075 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9802e09660b74c259960141c89402b5fdececf3f differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/98840eab4f9516cedcdcc88302b3639672a42f06-2 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/98840eab4f9516cedcdcc88302b3639672a42f06-2 new file mode 100644 index 00000000000..a4ab6b04a1a --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/98840eab4f9516cedcdcc88302b3639672a42f06-2 @@ -0,0 +1 @@ +G \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9a3d36ff249d8734de00dcaec41bcbf160adaecf-10 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9a3d36ff249d8734de00dcaec41bcbf160adaecf-10 new file mode 100644 index 00000000000..5d8918d0de9 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9a3d36ff249d8734de00dcaec41bcbf160adaecf-10 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9cd2374f626d4c55ce9389368b428a74be518319 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9cd2374f626d4c55ce9389368b428a74be518319 new file mode 100644 index 00000000000..bad7e46413d Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9cd2374f626d4c55ce9389368b428a74be518319 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9e6953ce446955eed1bee2d9819461068d2d37f6 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9e6953ce446955eed1bee2d9819461068d2d37f6 new file mode 100644 index 00000000000..16819ce5891 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/9e6953ce446955eed1bee2d9819461068d2d37f6 @@ -0,0 +1 @@ +?AP \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/a3953bd6a594600ab51268955ecee6fea8be4709-8 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/a3953bd6a594600ab51268955ecee6fea8be4709-8 new file mode 100644 index 00000000000..fcc0a2ed42f --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/a3953bd6a594600ab51268955ecee6fea8be4709-8 @@ -0,0 +1 @@ +3  \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/a61e7f9a73cdb1f8e21d1cc341429f15e1db3172-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/a61e7f9a73cdb1f8e21d1cc341429f15e1db3172-1 new file mode 100644 index 00000000000..8acab45d846 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/a61e7f9a73cdb1f8e21d1cc341429f15e1db3172-1 @@ -0,0 +1 @@ +% }6]  \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/ae1137dce39774d2ba19b77b1fc915c57ba95523-3 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/ae1137dce39774d2ba19b77b1fc915c57ba95523-3 new file mode 100644 index 00000000000..576f9593a65 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/ae1137dce39774d2ba19b77b1fc915c57ba95523-3 @@ -0,0 +1 @@ +@A<2 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b13d2b8e3724d1787d2105c1a4e3220e1e690339-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b13d2b8e3724d1787d2105c1a4e3220e1e690339-1 new file mode 100644 index 00000000000..deb3d2d03e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b13d2b8e3724d1787d2105c1a4e3220e1e690339-1 @@ -0,0 +1 @@ +r!ɦI7? `dswU&JL(7$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b1dc3183c3a0c093dd7b36bb8ef2a01c20e45635-4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b1dc3183c3a0c093dd7b36bb8ef2a01c20e45635-4 new file mode 100644 index 00000000000..02d78c8a01f --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b1dc3183c3a0c093dd7b36bb8ef2a01c20e45635-4 @@ -0,0 +1 @@ +G|7] \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b1f5380ef7c0a96f102e2ac2fed5e4a227a0d9f4 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b1f5380ef7c0a96f102e2ac2fed5e4a227a0d9f4 new file mode 100644 index 00000000000..68b1c2b8dfa --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b1f5380ef7c0a96f102e2ac2fed5e4a227a0d9f4 @@ -0,0 +1 @@ +A~sq+0wyFM \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b4b2324b290ec96791a1ad910fd62d8b6d5b32cc-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b4b2324b290ec96791a1ad910fd62d8b6d5b32cc-1 new file mode 100644 index 00000000000..6e5c61392e4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b4b2324b290ec96791a1ad910fd62d8b6d5b32cc-1 @@ -0,0 +1,2 @@ +-xEW’H ZAV!h[ٷ& +͇IcʨM2 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b60c99af5868c013f98cdce9781b4fffeed81f30 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b60c99af5868c013f98cdce9781b4fffeed81f30 new file mode 100644 index 00000000000..a3874c6426a Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b60c99af5868c013f98cdce9781b4fffeed81f30 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b62b1d73848fde9b74a166239cef1a76506a9e41-1 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b62b1d73848fde9b74a166239cef1a76506a9e41-1 new file mode 100644 index 00000000000..e40482bb813 Binary files /dev/null and b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/b62b1d73848fde9b74a166239cef1a76506a9e41-1 differ diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/bedb6456c65af2ca319c6630db22f2e362dd73f2-5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/bedb6456c65af2ca319c6630db22f2e362dd73f2-5 new file mode 100644 index 00000000000..1be3f79cdf3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/bedb6456c65af2ca319c6630db22f2e362dd73f2-5 @@ -0,0 +1 @@ +V \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c0a4a69dff77a11d744507864de0a15d000afd3f-5 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c0a4a69dff77a11d744507864de0a15d000afd3f-5 new file mode 100644 index 00000000000..fe2ec6225c3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c0a4a69dff77a11d744507864de0a15d000afd3f-5 @@ -0,0 +1 @@ +)tZ럗EdEs/@& \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c2d596d0601332a809c19c0347580c78b079f822-8 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c2d596d0601332a809c19c0347580c78b079f822-8 new file mode 100644 index 00000000000..69531c19aaf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c2d596d0601332a809c19c0347580c78b079f822-8 @@ -0,0 +1 @@ +$$$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c3d814235d50a4f96ced41bf5fc9c53c4e408765 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c3d814235d50a4f96ced41bf5fc9c53c4e408765 new file mode 100644 index 00000000000..198232f6067 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c3d814235d50a4f96ced41bf5fc9c53c4e408765 @@ -0,0 +1 @@ +"O|LP;_(<7CP](G#\A_ٮK" \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c43bfe87577eecc822788ffeb9dadf2cd7b8d2f0 b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c43bfe87577eecc822788ffeb9dadf2cd7b8d2f0 new file mode 100644 index 00000000000..7349f4bfe9c --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c43bfe87577eecc822788ffeb9dadf2cd7b8d2f0 @@ -0,0 +1 @@ +aG43FMuuxo /!in;. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c7198f2cc1dd9bffea474e6eb4b189ecdf2f965d b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c7198f2cc1dd9bffea474e6eb4b189ecdf2f965d new file mode 100644 index 00000000000..d93258404af --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fuzz/decompress/corpus/c7198f2cc1dd9bffea474e6eb4b189ecdf2f965d @@ -0,0 +1 @@ +#_!= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needConv = true + } + if z.buf[i] == 0 { + // Digest covers the NUL terminator. + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) + + // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). + if needConv { + s := make([]rune, 0, i) + for _, v := range z.buf[:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[:i]), nil + } + } +} + +// readHeader reads the GZIP header according to section 2.3.1. +// This method does not set z.err. +func (z *Reader) readHeader() (hdr Header, err error) { + if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { + // RFC 1952, section 2.2, says the following: + // A gzip file consists of a series of "members" (compressed data sets). + // + // Other than this, the specification does not clarify whether a + // "series" is defined as "one or more" or "zero or more". To err on the + // side of caution, Go interprets this to mean "zero or more". + // Thus, it is okay to return io.EOF here. + return hdr, err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return hdr, ErrHeader + } + flg := z.buf[3] + hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) + // z.buf[8] is XFL and is currently ignored. + hdr.OS = z.buf[9] + z.digest = crc32.ChecksumIEEE(z.buf[:10]) + + if flg&flagExtra != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) + data := make([]byte, le.Uint16(z.buf[:2])) + if _, err = io.ReadFull(z.r, data); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, data) + hdr.Extra = data + } + + var s string + if flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Name = s + } + + if flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Comment = s + } + + if flg&flagHdrCrc != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + digest := le.Uint16(z.buf[:2]) + if digest != uint16(z.digest) { + return hdr, ErrHeader + } + } + + z.digest = 0 + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } + return hdr, nil +} + +// Read implements io.Reader, reading uncompressed bytes from its underlying Reader. +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } + + // Read from next file, if necessary. + if n > 0 { + return n, nil + } + return z.Read(p) +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (z *Reader) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + crcWriter := crc32.NewIEEE() + for { + if z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + + // We write both to output and digest. + mw := io.MultiWriter(w, crcWriter) + n, err := z.decompressor.(io.WriterTo).WriteTo(mw) + total += n + z.size += uint32(n) + if err != nil { + z.err = err + return total, z.err + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return total, err + } + z.digest = crcWriter.Sum32() + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return total, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return total, nil + } + crcWriter.Reset() + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +// In order for the GZIP checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip_test.go b/vendor/github.com/klauspost/compress/gzip/gunzip_test.go new file mode 100644 index 00000000000..bc7ca923d0c --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip_test.go @@ -0,0 +1,716 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "bytes" + oldgz "compress/gzip" + "crypto/rand" + "io" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/klauspost/compress/flate" +) + +type gunzipTest struct { + name string + desc string + raw string + gzip []byte + err error +} + +var gunzipTests = []gunzipTest{ + { // has 1 empty fixed-huffman block + "empty.txt", + "empty.txt", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xf7, 0x5e, 0x14, 0x4a, + 0x00, 0x03, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + nil, + }, + { + "", + "empty - with no file name", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, + 0x00, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block + "hello.txt", + "hello.txt", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // concatenation + "hello.txt", + "hello.txt x2", + "hello world\n" + + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // has a fixed huffman block with some length-distance pairs + "shesells.txt", + "shesells.txt", + "she sells seashells by the seashore\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x72, 0x66, 0x8b, 0x4a, + 0x00, 0x03, 0x73, 0x68, 0x65, 0x73, 0x65, 0x6c, + 0x6c, 0x73, 0x2e, 0x74, 0x78, 0x74, 0x00, 0x2b, + 0xce, 0x48, 0x55, 0x28, 0x4e, 0xcd, 0xc9, 0x29, + 0x06, 0x92, 0x89, 0xc5, 0x19, 0x60, 0x56, 0x52, + 0xa5, 0x42, 0x09, 0x58, 0x18, 0x28, 0x90, 0x5f, + 0x94, 0xca, 0x05, 0x00, 0x76, 0xb0, 0x3b, 0xeb, + 0x24, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has dynamic huffman blocks + "gettysburg", + "gettysburg", + " Four score and seven years ago our fathers brought forth on\n" + + "this continent, a new nation, conceived in Liberty, and dedicated\n" + + "to the proposition that all men are created equal.\n" + + " Now we are engaged in a great Civil War, testing whether that\n" + + "nation, or any nation so conceived and so dedicated, can long\n" + + "endure.\n" + + " We are met on a great battle-field of that war.\n" + + " We have come to dedicate a portion of that field, as a final\n" + + "resting place for those who here gave their lives that that\n" + + "nation might live. It is altogether fitting and proper that\n" + + "we should do this.\n" + + " But, in a larger sense, we can not dedicate — we can not\n" + + "consecrate — we can not hallow — this ground.\n" + + " The brave men, living and dead, who struggled here, have\n" + + "consecrated it, far above our poor power to add or detract.\n" + + "The world will little note, nor long remember what we say here,\n" + + "but it can never forget what they did here.\n" + + " It is for us the living, rather, to be dedicated here to the\n" + + "unfinished work which they who fought here have thus far so\n" + + "nobly advanced. It is rather for us to be here dedicated to\n" + + "the great task remaining before us — that from these honored\n" + + "dead we take increased devotion to that cause for which they\n" + + "gave the last full measure of devotion —\n" + + " that we here highly resolve that these dead shall not have\n" + + "died in vain — that this nation, under God, shall have a new\n" + + "birth of freedom — and that government of the people, by the\n" + + "people, for the people, shall not perish from this earth.\n" + + "\n" + + "Abraham Lincoln, November 19, 1863, Gettysburg, Pennsylvania\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xd1, 0x12, 0x2b, 0x4a, + 0x00, 0x03, 0x67, 0x65, 0x74, 0x74, 0x79, 0x73, + 0x62, 0x75, 0x72, 0x67, 0x00, 0x65, 0x54, 0xcd, + 0x6e, 0xd4, 0x30, 0x10, 0xbe, 0xfb, 0x29, 0xe6, + 0x01, 0x42, 0xa5, 0x0a, 0x09, 0xc1, 0x11, 0x90, + 0x40, 0x48, 0xa8, 0xe2, 0x80, 0xd4, 0xf3, 0x24, + 0x9e, 0x24, 0x56, 0xbd, 0x9e, 0xc5, 0x76, 0x76, + 0x95, 0x1b, 0x0f, 0xc1, 0x13, 0xf2, 0x24, 0x7c, + 0x63, 0x77, 0x9b, 0x4a, 0x5c, 0xaa, 0x6e, 0x6c, + 0xcf, 0x7c, 0x7f, 0x33, 0x44, 0x5f, 0x74, 0xcb, + 0x54, 0x26, 0xcd, 0x42, 0x9c, 0x3c, 0x15, 0xb9, + 0x48, 0xa2, 0x5d, 0x38, 0x17, 0xe2, 0x45, 0xc9, + 0x4e, 0x67, 0xae, 0xab, 0xe0, 0xf7, 0x98, 0x75, + 0x5b, 0xd6, 0x4a, 0xb3, 0xe6, 0xba, 0x92, 0x26, + 0x57, 0xd7, 0x50, 0x68, 0xd2, 0x54, 0x43, 0x92, + 0x54, 0x07, 0x62, 0x4a, 0x72, 0xa5, 0xc4, 0x35, + 0x68, 0x1a, 0xec, 0x60, 0x92, 0x70, 0x11, 0x4f, + 0x21, 0xd1, 0xf7, 0x30, 0x4a, 0xae, 0xfb, 0xd0, + 0x9a, 0x78, 0xf1, 0x61, 0xe2, 0x2a, 0xde, 0x55, + 0x25, 0xd4, 0xa6, 0x73, 0xd6, 0xb3, 0x96, 0x60, + 0xef, 0xf0, 0x9b, 0x2b, 0x71, 0x8c, 0x74, 0x02, + 0x10, 0x06, 0xac, 0x29, 0x8b, 0xdd, 0x25, 0xf9, + 0xb5, 0x71, 0xbc, 0x73, 0x44, 0x0f, 0x7a, 0xa5, + 0xab, 0xb4, 0x33, 0x49, 0x0b, 0x2f, 0xbd, 0x03, + 0xd3, 0x62, 0x17, 0xe9, 0x73, 0xb8, 0x84, 0x48, + 0x8f, 0x9c, 0x07, 0xaa, 0x52, 0x00, 0x6d, 0xa1, + 0xeb, 0x2a, 0xc6, 0xa0, 0x95, 0x76, 0x37, 0x78, + 0x9a, 0x81, 0x65, 0x7f, 0x46, 0x4b, 0x45, 0x5f, + 0xe1, 0x6d, 0x42, 0xe8, 0x01, 0x13, 0x5c, 0x38, + 0x51, 0xd4, 0xb4, 0x38, 0x49, 0x7e, 0xcb, 0x62, + 0x28, 0x1e, 0x3b, 0x82, 0x93, 0x54, 0x48, 0xf1, + 0xd2, 0x7d, 0xe4, 0x5a, 0xa3, 0xbc, 0x99, 0x83, + 0x44, 0x4f, 0x3a, 0x77, 0x36, 0x57, 0xce, 0xcf, + 0x2f, 0x56, 0xbe, 0x80, 0x90, 0x9e, 0x84, 0xea, + 0x51, 0x1f, 0x8f, 0xcf, 0x90, 0xd4, 0x60, 0xdc, + 0x5e, 0xb4, 0xf7, 0x10, 0x0b, 0x26, 0xe0, 0xff, + 0xc4, 0xd1, 0xe5, 0x67, 0x2e, 0xe7, 0xc8, 0x93, + 0x98, 0x05, 0xb8, 0xa8, 0x45, 0xc0, 0x4d, 0x09, + 0xdc, 0x84, 0x16, 0x2b, 0x0d, 0x9a, 0x21, 0x53, + 0x04, 0x8b, 0xd2, 0x0b, 0xbd, 0xa2, 0x4c, 0xa7, + 0x60, 0xee, 0xd9, 0xe1, 0x1d, 0xd1, 0xb7, 0x4a, + 0x30, 0x8f, 0x63, 0xd5, 0xa5, 0x8b, 0x33, 0x87, + 0xda, 0x1a, 0x18, 0x79, 0xf3, 0xe3, 0xa6, 0x17, + 0x94, 0x2e, 0xab, 0x6e, 0xa0, 0xe3, 0xcd, 0xac, + 0x50, 0x8c, 0xca, 0xa7, 0x0d, 0x76, 0x37, 0xd1, + 0x23, 0xe7, 0x05, 0x57, 0x8b, 0xa4, 0x22, 0x83, + 0xd9, 0x62, 0x52, 0x25, 0xad, 0x07, 0xbb, 0xbf, + 0xbf, 0xff, 0xbc, 0xfa, 0xee, 0x20, 0x73, 0x91, + 0x29, 0xff, 0x7f, 0x02, 0x71, 0x62, 0x84, 0xb5, + 0xf6, 0xb5, 0x25, 0x6b, 0x41, 0xde, 0x92, 0xb7, + 0x76, 0x3f, 0x91, 0x91, 0x31, 0x1b, 0x41, 0x84, + 0x62, 0x30, 0x0a, 0x37, 0xa4, 0x5e, 0x18, 0x3a, + 0x99, 0x08, 0xa5, 0xe6, 0x6d, 0x59, 0x22, 0xec, + 0x33, 0x39, 0x86, 0x26, 0xf5, 0xab, 0x66, 0xc8, + 0x08, 0x20, 0xcf, 0x0c, 0xd7, 0x47, 0x45, 0x21, + 0x0b, 0xf6, 0x59, 0xd5, 0xfe, 0x5c, 0x8d, 0xaa, + 0x12, 0x7b, 0x6f, 0xa1, 0xf0, 0x52, 0x33, 0x4f, + 0xf5, 0xce, 0x59, 0xd3, 0xab, 0x66, 0x10, 0xbf, + 0x06, 0xc4, 0x31, 0x06, 0x73, 0xd6, 0x80, 0xa2, + 0x78, 0xc2, 0x45, 0xcb, 0x03, 0x65, 0x39, 0xc9, + 0x09, 0xd1, 0x06, 0x04, 0x33, 0x1a, 0x5a, 0xf1, + 0xde, 0x01, 0xb8, 0x71, 0x83, 0xc4, 0xb5, 0xb3, + 0xc3, 0x54, 0x65, 0x33, 0x0d, 0x5a, 0xf7, 0x9b, + 0x90, 0x7c, 0x27, 0x1f, 0x3a, 0x58, 0xa3, 0xd8, + 0xfd, 0x30, 0x5f, 0xb7, 0xd2, 0x66, 0xa2, 0x93, + 0x1c, 0x28, 0xb7, 0xe9, 0x1b, 0x0c, 0xe1, 0x28, + 0x47, 0x26, 0xbb, 0xe9, 0x7d, 0x7e, 0xdc, 0x96, + 0x10, 0x92, 0x50, 0x56, 0x7c, 0x06, 0xe2, 0x27, + 0xb4, 0x08, 0xd3, 0xda, 0x7b, 0x98, 0x34, 0x73, + 0x9f, 0xdb, 0xf6, 0x62, 0xed, 0x31, 0x41, 0x13, + 0xd3, 0xa2, 0xa8, 0x4b, 0x3a, 0xc6, 0x1d, 0xe4, + 0x2f, 0x8c, 0xf8, 0xfb, 0x97, 0x64, 0xf4, 0xb6, + 0x2f, 0x80, 0x5a, 0xf3, 0x56, 0xe0, 0x40, 0x50, + 0xd5, 0x19, 0xd0, 0x1e, 0xfc, 0xca, 0xe5, 0xc9, + 0xd4, 0x60, 0x00, 0x81, 0x2e, 0xa3, 0xcc, 0xb6, + 0x52, 0xf0, 0xb4, 0xdb, 0x69, 0x99, 0xce, 0x7a, + 0x32, 0x4c, 0x08, 0xed, 0xaa, 0x10, 0x10, 0xe3, + 0x6f, 0xee, 0x99, 0x68, 0x95, 0x9f, 0x04, 0x71, + 0xb2, 0x49, 0x2f, 0x62, 0xa6, 0x5e, 0xb4, 0xef, + 0x02, 0xed, 0x4f, 0x27, 0xde, 0x4a, 0x0f, 0xfd, + 0xc1, 0xcc, 0xdd, 0x02, 0x8f, 0x08, 0x16, 0x54, + 0xdf, 0xda, 0xca, 0xe0, 0x82, 0xf1, 0xb4, 0x31, + 0x7a, 0xa9, 0x81, 0xfe, 0x90, 0xb7, 0x3e, 0xdb, + 0xd3, 0x35, 0xc0, 0x20, 0x80, 0x33, 0x46, 0x4a, + 0x63, 0xab, 0xd1, 0x0d, 0x29, 0xd2, 0xe2, 0x84, + 0xb8, 0xdb, 0xfa, 0xe9, 0x89, 0x44, 0x86, 0x7c, + 0xe8, 0x0b, 0xe6, 0x02, 0x6a, 0x07, 0x9b, 0x96, + 0xd0, 0xdb, 0x2e, 0x41, 0x4c, 0xa1, 0xd5, 0x57, + 0x45, 0x14, 0xfb, 0xe3, 0xa6, 0x72, 0x5b, 0x87, + 0x6e, 0x0c, 0x6d, 0x5b, 0xce, 0xe0, 0x2f, 0xe2, + 0x21, 0x81, 0x95, 0xb0, 0xe8, 0xb6, 0x32, 0x0b, + 0xb2, 0x98, 0x13, 0x52, 0x5d, 0xfb, 0xec, 0x63, + 0x17, 0x8a, 0x9e, 0x23, 0x22, 0x36, 0xee, 0xcd, + 0xda, 0xdb, 0xcf, 0x3e, 0xf1, 0xc7, 0xf1, 0x01, + 0x12, 0x93, 0x0a, 0xeb, 0x6f, 0xf2, 0x02, 0x15, + 0x96, 0x77, 0x5d, 0xef, 0x9c, 0xfb, 0x88, 0x91, + 0x59, 0xf9, 0x84, 0xdd, 0x9b, 0x26, 0x8d, 0x80, + 0xf9, 0x80, 0x66, 0x2d, 0xac, 0xf7, 0x1f, 0x06, + 0xba, 0x7f, 0xff, 0xee, 0xed, 0x40, 0x5f, 0xa5, + 0xd6, 0xbd, 0x8c, 0x5b, 0x46, 0xd2, 0x7e, 0x48, + 0x4a, 0x65, 0x8f, 0x08, 0x42, 0x60, 0xf7, 0x0f, + 0xb9, 0x16, 0x0b, 0x0c, 0x1a, 0x06, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block then garbage + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, 'g', 'a', 'r', 'b', 'a', 'g', 'e', '!', '!', '!', + }, + ErrHeader, + }, + { // has 1 non-empty fixed huffman block not enough header + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, gzipID1, + }, + io.ErrUnexpectedEOF, + }, + { // has 1 non-empty fixed huffman block but corrupt checksum + "hello.txt", + "hello.txt + corrupt checksum", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, + { // has 1 non-empty fixed huffman block but corrupt size + "hello.txt", + "hello.txt + corrupt size", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0xff, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, + { + "f1l3n4m3.tXt", + "header with all fields used", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x1e, 0x70, 0xf0, 0xf9, 0x4a, + 0x00, 0xaa, 0x09, 0x00, 0x7a, 0x7a, 0x05, 0x00, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x31, 0x6c, + 0x33, 0x6e, 0x34, 0x6d, 0x33, 0x2e, 0x74, 0x58, + 0x74, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, + 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, + 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, + 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, + 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, + 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, + 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, + 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, + 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, + 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, + 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, + 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, + 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, + 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, + 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, + 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, + 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, + 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, + 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, + 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, + 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, + 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, + 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, + 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, + 0xff, 0x00, 0x92, 0xfd, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + }, + nil, + }, + { + "", + "truncated gzip file amid raw-block", + "hello", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f, + }, + io.ErrUnexpectedEOF, + }, + { + "", + "truncated gzip file amid fixed-block", + "He", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xf2, 0x48, 0xcd, + }, + io.ErrUnexpectedEOF, + }, +} + +func TestDecompressor(t *testing.T) { + b := new(bytes.Buffer) + for _, tt := range gunzipTests { + in := bytes.NewReader(tt.gzip) + gzip, err := NewReader(in) + if err != nil { + t.Errorf("%s: NewReader: %s", tt.name, err) + continue + } + defer gzip.Close() + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err := io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s := b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + + // Test Reader Reset. + in = bytes.NewReader(tt.gzip) + err = gzip.Reset(in) + if err != nil { + t.Errorf("%s: Reset: %s", tt.name, err) + continue + } + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err = io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s = b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + } +} + +func TestIssue6550(t *testing.T) { + f, err := os.Open("testdata/issue6550.gz") + if err != nil { + t.Fatal(err) + } + gzip, err := NewReader(f) + if err != nil { + t.Fatalf("NewReader(testdata/issue6550.gz): %v", err) + } + defer gzip.Close() + done := make(chan bool, 1) + go func() { + _, err := io.Copy(ioutil.Discard, gzip) + if err == nil { + t.Errorf("Copy succeeded") + } else { + t.Logf("Copy failed (correctly): %v", err) + } + done <- true + }() + select { + case <-time.After(1 * time.Second): + t.Errorf("Copy hung") + case <-done: + // ok + } +} + +func TestInitialReset(t *testing.T) { + var r Reader + if err := r.Reset(bytes.NewReader(gunzipTests[1].gzip)); err != nil { + t.Error(err) + } + var buf bytes.Buffer + if _, err := io.Copy(&buf, &r); err != nil { + t.Error(err) + } + if s := buf.String(); s != gunzipTests[1].raw { + t.Errorf("got %q want %q", s, gunzipTests[1].raw) + } +} + +func TestMultistreamFalse(t *testing.T) { + // Find concatenation test. + var tt gunzipTest + for _, tt = range gunzipTests { + if strings.HasSuffix(tt.desc, " x2") { + goto Found + } + } + t.Fatal("cannot find hello.txt x2 in gunzip tests") + +Found: + br := bytes.NewReader(tt.gzip) + var r Reader + if err := r.Reset(br); err != nil { + t.Fatalf("first reset: %v", err) + } + + // Expect two streams with "hello world\n", then real EOF. + const hello = "hello world\n" + + r.Multistream(false) + data, err := ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != nil { + t.Fatalf("second reset: %v", err) + } + r.Multistream(false) + data, err = ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != io.EOF { + t.Fatalf("third reset: err=%v, want io.EOF", err) + } +} + +func TestWriteTo(t *testing.T) { + input := make([]byte, 100000) + n, err := rand.Read(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + compressed := &bytes.Buffer{} + // Do it twice to test MultiStream functionality + for i := 0; i < 2; i++ { + w, err := NewWriterLevel(compressed, -2) + if err != nil { + t.Fatal(err) + } + n, err = w.Write(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + w.Close() + } + input = append(input, input...) + buf := compressed.Bytes() + + dec, err := NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure. + readall, err := ioutil.ReadAll(ioutil.NopCloser(dec)) + if err != nil { + t.Fatal(err) + } + if len(readall) != len(input) { + t.Errorf("did not decompress everything, want %d, got %d", len(input), len(readall)) + } + if bytes.Compare(readall, input) != 0 { + t.Error("output did not match input") + } + + dec, err = NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + wtbuf := &bytes.Buffer{} + written, err := dec.WriteTo(wtbuf) + if err != nil { + t.Fatal(err) + } + if written != int64(len(input)) { + t.Error("Returned length did not match, expected", len(input), "got", written) + } + if wtbuf.Len() != len(input) { + t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len()) + } + if bytes.Compare(wtbuf.Bytes(), input) != 0 { + t.Fatal("output did not match input") + } +} + +func TestNilStream(t *testing.T) { + // Go liberally interprets RFC 1952 section 2.2 to mean that a gzip file + // consist of zero or more members. Thus, we test that a nil stream is okay. + _, err := NewReader(bytes.NewReader(nil)) + if err != io.EOF { + t.Fatalf("NewReader(nil) on empty stream: got %v, want io.EOF", err) + } +} + +func TestTruncatedStreams(t *testing.T) { + const data = "\x1f\x8b\b\x04\x00\tn\x88\x00\xff\a\x00foo bar\xcbH\xcd\xc9\xc9\xd7Q(\xcf/\xcaI\x01\x04:r\xab\xff\f\x00\x00\x00" + + // Intentionally iterate starting with at least one byte in the stream. + for i := 1; i < len(data)-1; i++ { + r, err := NewReader(strings.NewReader(data[:i])) + if err != nil { + if err != io.ErrUnexpectedEOF { + t.Errorf("NewReader(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF) + } + continue + } + _, err = io.Copy(ioutil.Discard, r) + if ferr, ok := err.(*flate.ReadError); ok { + err = ferr.Err + } + if err != io.ErrUnexpectedEOF { + t.Errorf("io.Copy(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF) + } + } +} + +func BenchmarkGunzipCopy(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + r, err := NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipNoWriteTo(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ioutil.NopCloser(r)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipStdlib(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := oldgz.NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} +func TestTruncatedGunzip(t *testing.T) { + in := []byte(strings.Repeat("ASDFASDFASDFASDFASDF", 1000)) + var buf bytes.Buffer + enc := NewWriter(&buf) + _, err := enc.Write(in) + if err != nil { + t.Fatal(err) + } + enc.Close() + testdata := buf.Bytes() + for i := 5; i < len(testdata); i += 10 { + timer := time.NewTimer(time.Second) + done := make(chan struct{}) + fail := make(chan struct{}) + go func() { + r, err := NewReader(bytes.NewBuffer(testdata[:i])) + if err == nil { + b, err := ioutil.ReadAll(r) + if err == nil && !bytes.Equal(testdata[:i], b) { + close(fail) + } + } + close(done) + }() + select { + case <-timer.C: + t.Fatal("Timeout decoding") + case <-fail: + t.Fatal("No error, but mismatch") + case <-done: + timer.Stop() + } + } +} diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go new file mode 100644 index 00000000000..7da7ee7486e --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -0,0 +1,251 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "errors" + "fmt" + "hash/crc32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header // written at first call to Write, Flush, or Close + w io.Writer + level int + wroteHeader bool + compressor *flate.Writer + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + closed bool + buf [10]byte + err error +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write, Flush, or Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.init(w, level) + return z, nil +} + +func (z *Writer) init(w io.Writer, level int) { + compressor := z.compressor + if compressor != nil { + compressor.Reset(w) + } + *z = Writer{ + Header: Header{ + OS: 255, // unknown + }, + w: w, + level: level, + compressor: compressor, + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + z.init(w, z.level) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + le.PutUint16(z.buf[:2], uint16(len(b))) + _, err := z.w.Write(z.buf[:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[:1]) + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed. +func (z *Writer) Write(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + var n int + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + n, z.err = z.w.Write(z.buf[:10]) + if z.err != nil { + return n, z.err + } + if z.Extra != nil { + z.err = z.writeBytes(z.Extra) + if z.err != nil { + return n, z.err + } + } + if z.Name != "" { + z.err = z.writeString(z.Name) + if z.err != nil { + return n, z.err + } + } + if z.Comment != "" { + z.err = z.writeString(z.Comment) + if z.err != nil { + return n, z.err + } + } + if z.compressor == nil { + z.compressor, _ = flate.NewWriter(z.w, z.level) + } + } + z.size += uint32(len(p)) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p) + n, z.err = z.compressor.Write(p) + return n, z.err +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + le.PutUint32(z.buf[:4], z.digest) + le.PutUint32(z.buf[4:8], z.size) + _, z.err = z.w.Write(z.buf[:8]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/gzip/gzip_test.go b/vendor/github.com/klauspost/compress/gzip/gzip_test.go new file mode 100644 index 00000000000..b18bb54b045 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip_test.go @@ -0,0 +1,519 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "bufio" + "bytes" + oldgz "compress/gzip" + "io" + "io/ioutil" + "math/rand" + "testing" + "time" +) + +// TestEmpty tests that an empty payload still forms a valid GZIP stream. +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + + if err := NewWriter(buf).Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if len(b) != 0 { + t.Fatalf("got %d bytes, want 0", len(b)) + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestRoundTrip tests that gzipping and then gunzipping is the identity +// function. +func TestRoundTrip(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + if _, err := w.Write([]byte("payload")); err != nil { + t.Fatalf("Write: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if string(b) != "payload" { + t.Fatalf("payload is %q, want %q", string(b), "payload") + } + if r.Comment != "comment" { + t.Fatalf("comment is %q, want %q", r.Comment, "comment") + } + if string(r.Extra) != "extra" { + t.Fatalf("extra is %q, want %q", r.Extra, "extra") + } + if r.ModTime.Unix() != 1e8 { + t.Fatalf("mtime is %d, want %d", r.ModTime.Unix(), uint32(1e8)) + } + if r.Name != "name" { + t.Fatalf("name is %q, want %q", r.Name, "name") + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestLatin1 tests the internal functions for converting to and from Latin-1. +func TestLatin1(t *testing.T) { + latin1 := []byte{0xc4, 'u', 0xdf, 'e', 'r', 'u', 'n', 'g', 0} + utf8 := "Äußerung" + z := Reader{r: bufio.NewReader(bytes.NewReader(latin1))} + s, err := z.readString() + if err != nil { + t.Fatalf("readString: %v", err) + } + if s != utf8 { + t.Fatalf("read latin-1: got %q, want %q", s, utf8) + } + + buf := bytes.NewBuffer(make([]byte, 0, len(latin1))) + c := Writer{w: buf} + if err = c.writeString(utf8); err != nil { + t.Fatalf("writeString: %v", err) + } + s = buf.String() + if s != string(latin1) { + t.Fatalf("write utf-8: got %q, want %q", s, string(latin1)) + } +} + +// TestLatin1RoundTrip tests that metadata that is representable in Latin-1 +// survives a round trip. +func TestLatin1RoundTrip(t *testing.T) { + testCases := []struct { + name string + ok bool + }{ + {"", true}, + {"ASCII is OK", true}, + {"unless it contains a NUL\x00", false}, + {"no matter where \x00 occurs", false}, + {"\x00\x00\x00", false}, + {"Látin-1 also passes (U+00E1)", true}, + {"but LĀtin Extended-A (U+0100) does not", false}, + {"neither does 日本語", false}, + {"invalid UTF-8 also \xffails", false}, + {"\x00 as does Látin-1 with NUL", false}, + } + for _, tc := range testCases { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Name = tc.name + err := w.Close() + if (err == nil) != tc.ok { + t.Errorf("Writer.Close: name = %q, err = %v", tc.name, err) + continue + } + if !tc.ok { + continue + } + + r, err := NewReader(buf) + if err != nil { + t.Errorf("NewReader: %v", err) + continue + } + _, err = ioutil.ReadAll(r) + if err != nil { + t.Errorf("ReadAll: %v", err) + continue + } + if r.Name != tc.name { + t.Errorf("name is %q, want %q", r.Name, tc.name) + continue + } + if err := r.Close(); err != nil { + t.Errorf("Reader.Close: %v", err) + continue + } + } +} + +func TestWriterFlush(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + + n0 := buf.Len() + if n0 != 0 { + t.Fatalf("buffer size = %d before writes; want 0", n0) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n1 := buf.Len() + if n1 == 0 { + t.Fatal("no data after first flush") + } + + w.Write([]byte("x")) + + n2 := buf.Len() + if n1 != n2 { + t.Fatalf("after writing a single byte, size changed from %d to %d; want no change", n1, n2) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n3 := buf.Len() + if n2 == n3 { + t.Fatal("Flush didn't flush any data") + } +} + +// Multiple gzip files concatenated form a valid gzip file. +func TestConcat(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(&buf) + w.Write([]byte("hello ")) + w.Close() + w = NewWriter(&buf) + w.Write([]byte("world\n")) + w.Close() + + r, err := NewReader(&buf) + data, err := ioutil.ReadAll(r) + if string(data) != "hello world\n" || err != nil { + t.Fatalf("ReadAll = %q, %v, want %q, nil", data, err, "hello world") + } +} + +func TestWriterReset(t *testing.T) { + buf := new(bytes.Buffer) + buf2 := new(bytes.Buffer) + z := NewWriter(buf) + msg := []byte("hello world") + z.Write(msg) + z.Close() + z.Reset(buf2) + z.Write(msg) + z.Close() + if buf.String() != buf2.String() { + t.Errorf("buf2 %q != original buf of %q", buf2.String(), buf.String()) + } +} + +var testbuf []byte + +func testFile(i, level int, t *testing.T) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dl := len(dat) + if len(testbuf) != i*dl { + // Make results predictable + testbuf = make([]byte, i*dl) + for j := 0; j < i; j++ { + copy(testbuf[j*dl:j*dl+dl], dat) + } + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, err := NewWriterLevel(&buf, DefaultCompression) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + if int(n) != len(testbuf) { + t.Fatal("Short write:", n, "!=", testbuf) + } + err = w.Close() + if err != nil { + t.Fatal(err) + } + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestFile1xM2(t *testing.T) { testFile(1, -2, t) } +func TestFile1xM1(t *testing.T) { testFile(1, -1, t) } +func TestFile1x0(t *testing.T) { testFile(1, 0, t) } +func TestFile1x1(t *testing.T) { testFile(1, 1, t) } +func TestFile1x2(t *testing.T) { testFile(1, 2, t) } +func TestFile1x3(t *testing.T) { testFile(1, 3, t) } +func TestFile1x4(t *testing.T) { testFile(1, 4, t) } +func TestFile1x5(t *testing.T) { testFile(1, 5, t) } +func TestFile1x6(t *testing.T) { testFile(1, 6, t) } +func TestFile1x7(t *testing.T) { testFile(1, 7, t) } +func TestFile1x8(t *testing.T) { testFile(1, 8, t) } +func TestFile1x9(t *testing.T) { testFile(1, 9, t) } +func TestFile10(t *testing.T) { testFile(10, DefaultCompression, t) } + +func TestFile50(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(50, DefaultCompression, t) +} + +func TestFile200(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(200, BestSpeed, t) +} + +func testBigGzip(i int, t *testing.T) { + if len(testbuf) != i { + // Make results predictable + rand.Seed(1337) + testbuf = make([]byte, i) + for idx := range testbuf { + testbuf[idx] = byte(65 + rand.Intn(20)) + } + } + c := BestCompression + if testing.Short() { + c = BestSpeed + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, err := NewWriterLevel(&buf, c) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + if int(n) != len(testbuf) { + t.Fatal("Short write:", n, "!=", len(testbuf)) + } + err = w.Close() + if err != nil { + t.Fatal(err.Error()) + } + + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestGzip1K(t *testing.T) { testBigGzip(1000, t) } +func TestGzip100K(t *testing.T) { testBigGzip(100000, t) } +func TestGzip1M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + + testBigGzip(1000000, t) +} +func TestGzip10M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testBigGzip(10000000, t) +} + +// Test if two runs produce identical results. +func TestDeterministicLM2(t *testing.T) { testDeterm(-2, t) } + +// Level 0 is not deterministic since it depends on the size of each write. +// func TestDeterministicL0(t *testing.T) { testDeterm(0, t) } +func TestDeterministicL1(t *testing.T) { testDeterm(1, t) } +func TestDeterministicL2(t *testing.T) { testDeterm(2, t) } +func TestDeterministicL3(t *testing.T) { testDeterm(3, t) } +func TestDeterministicL4(t *testing.T) { testDeterm(4, t) } +func TestDeterministicL5(t *testing.T) { testDeterm(5, t) } +func TestDeterministicL6(t *testing.T) { testDeterm(6, t) } +func TestDeterministicL7(t *testing.T) { testDeterm(7, t) } +func TestDeterministicL8(t *testing.T) { testDeterm(8, t) } +func TestDeterministicL9(t *testing.T) { testDeterm(9, t) } + +func testDeterm(i int, t *testing.T) { + var length = 500000 + if testing.Short() { + length = 100000 + } + rand.Seed(1337) + t1 := make([]byte, length) + for idx := range t1 { + t1[idx] = byte(65 + rand.Intn(8)) + } + + br := bytes.NewBuffer(t1) + var b1 bytes.Buffer + w, err := NewWriterLevel(&b1, i) + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + w.Flush() + w.Close() + + // We recreate the buffer, so we have a goos chance of getting a + // different memory address. + rand.Seed(1337) + t2 := make([]byte, length) + for idx := range t2 { + t2[idx] = byte(65 + rand.Intn(8)) + } + + br2 := bytes.NewBuffer(t2) + var b2 bytes.Buffer + w2, err := NewWriterLevel(&b2, i) + if err != nil { + t.Fatal(err) + } + + // We write the same data, but with a different size than + // the default copy. + for { + _, err = io.CopyN(w2, br2, 1234) + if err == io.EOF { + err = nil + break + } else if err != nil { + break + } + } + if err != nil { + t.Fatal(err) + } + w2.Flush() + w2.Close() + + b1b := b1.Bytes() + b2b := b2.Bytes() + + if bytes.Compare(b1b, b2b) != 0 { + t.Fatalf("Level %d did not produce deterministric result, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) + } +} + +func BenchmarkGzipLM2(b *testing.B) { benchmarkGzipN(b, -2) } +func BenchmarkGzipL1(b *testing.B) { benchmarkGzipN(b, 1) } +func BenchmarkGzipL2(b *testing.B) { benchmarkGzipN(b, 2) } +func BenchmarkGzipL3(b *testing.B) { benchmarkGzipN(b, 3) } +func BenchmarkGzipL4(b *testing.B) { benchmarkGzipN(b, 4) } +func BenchmarkGzipL5(b *testing.B) { benchmarkGzipN(b, 5) } +func BenchmarkGzipL6(b *testing.B) { benchmarkGzipN(b, 6) } +func BenchmarkGzipL7(b *testing.B) { benchmarkGzipN(b, 7) } +func BenchmarkGzipL8(b *testing.B) { benchmarkGzipN(b, 8) } +func BenchmarkGzipL9(b *testing.B) { benchmarkGzipN(b, 9) } + +func benchmarkGzipN(b *testing.B, level int) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + b.SetBytes(int64(len(dat))) + w, _ := NewWriterLevel(ioutil.Discard, level) + b.ResetTimer() + for n := 0; n < b.N; n++ { + w.Reset(ioutil.Discard) + n, err := w.Write(dat) + if n != len(dat) { + panic("short write") + } + if err != nil { + panic(err) + } + err = w.Close() + if err != nil { + panic(err) + } + } +} + +func BenchmarkOldGzipL1(b *testing.B) { benchmarkOldGzipN(b, 1) } +func BenchmarkOldGzipL2(b *testing.B) { benchmarkOldGzipN(b, 2) } +func BenchmarkOldGzipL3(b *testing.B) { benchmarkOldGzipN(b, 3) } +func BenchmarkOldGzipL4(b *testing.B) { benchmarkOldGzipN(b, 4) } +func BenchmarkOldGzipL5(b *testing.B) { benchmarkOldGzipN(b, 5) } +func BenchmarkOldGzipL6(b *testing.B) { benchmarkOldGzipN(b, 6) } +func BenchmarkOldGzipL7(b *testing.B) { benchmarkOldGzipN(b, 7) } +func BenchmarkOldGzipL8(b *testing.B) { benchmarkOldGzipN(b, 8) } +func BenchmarkOldGzipL9(b *testing.B) { benchmarkOldGzipN(b, 9) } + +func benchmarkOldGzipN(b *testing.B, level int) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + + b.SetBytes(int64(len(dat))) + w, _ := oldgz.NewWriterLevel(ioutil.Discard, level) + b.ResetTimer() + for n := 0; n < b.N; n++ { + w.Reset(ioutil.Discard) + n, err := w.Write(dat) + if n != len(dat) { + panic("short write") + } + if err != nil { + panic(err) + } + err = w.Close() + if err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/klauspost/compress/gzip/testdata/issue6550.gz b/vendor/github.com/klauspost/compress/gzip/testdata/issue6550.gz new file mode 100644 index 00000000000..57972b63668 Binary files /dev/null and b/vendor/github.com/klauspost/compress/gzip/testdata/issue6550.gz differ diff --git a/vendor/github.com/klauspost/compress/gzip/testdata/test.json b/vendor/github.com/klauspost/compress/gzip/testdata/test.json new file mode 100644 index 00000000000..3b7b678b5e4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/testdata/test.json @@ -0,0 +1,5902 @@ +[ + { + "_id": "543fa821aeca0fed7f182f01", + "index": 0, + "guid": "3526d142-6d2b-4266-9855-e6ec1589a265", + "isActive": false, + "balance": "$2,156.72", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "brown", + "name": { + "first": "Rosella", + "last": "Hale" + }, + "company": "SKINSERVE", + "email": "rosella.hale@skinserve.net", + "phone": "+1 (920) 528-2959", + "address": "324 Imlay Street, Sehili, Guam, 3022", + "about": "Est consectetur ut incididunt commodo elit cillum incididunt consectetur id officia pariatur pariatur cillum. Ipsum non incididunt tempor non. Cillum aliquip aliquip non minim ipsum voluptate incididunt adipisicing aute pariatur laborum minim deserunt laborum. Do do consequat enim adipisicing dolor incididunt reprehenderit sint. Veniam dolor consequat sint ullamco id enim occaecat.\r\n", + "registered": "Wednesday, August 27, 2014 9:12 PM", + "latitude": 43.44586, + "longitude": -65.480986, + "tags": [ + "Lorem", + "ex", + "magna", + "aliqua", + "id", + "sint", + "elit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Etta Stanton" + }, + { + "id": 1, + "name": "Cora Velazquez" + }, + { + "id": 2, + "name": "Deann Guy" + } + ], + "greeting": "Hello, Rosella! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8218066e8499ef38bcc", + "index": 1, + "guid": "991a35b5-91db-49e8-8a1e-13688b5ed88d", + "isActive": true, + "balance": "$1,762.71", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "green", + "name": { + "first": "Rose", + "last": "Lynn" + }, + "company": "NITRACYR", + "email": "rose.lynn@nitracyr.com", + "phone": "+1 (912) 564-2131", + "address": "485 Pulaski Street, Logan, Mississippi, 7453", + "about": "Minim proident enim eiusmod reprehenderit excepteur laboris. Adipisicing culpa cupidatat eiusmod exercitation reprehenderit anim. Nostrud mollit reprehenderit reprehenderit id magna et id esse cillum et proident. Incididunt eu nisi excepteur est est irure voluptate id nulla. Laboris consectetur aliqua cupidatat ex elit proident officia ex quis. Minim officia eu eiusmod velit. Ullamco dolor non quis aliqua cupidatat amet laborum laborum ad ex proident qui eiusmod ea.\r\n", + "registered": "Sunday, October 5, 2014 10:36 PM", + "latitude": -3.548698, + "longitude": 79.421107, + "tags": [ + "exercitation", + "adipisicing", + "aliqua", + "do", + "id", + "veniam", + "est" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Ada Little" + }, + { + "id": 1, + "name": "Lopez Osborne" + }, + { + "id": 2, + "name": "Tami Leach" + } + ], + "greeting": "Hello, Rose! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821255974bb9f89e5ea", + "index": 2, + "guid": "e5727238-63a4-4e1e-88cc-67300826259c", + "isActive": false, + "balance": "$2,131.97", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "green", + "name": { + "first": "Gloria", + "last": "Richards" + }, + "company": "SPHERIX", + "email": "gloria.richards@spherix.biz", + "phone": "+1 (884) 536-3434", + "address": "493 Judge Street, Cetronia, Rhode Island, 4439", + "about": "Lorem cupidatat ea et laboris tempor enim non. Sit consequat culpa et qui aute cillum ut ullamco. Nulla duis sit Lorem incididunt mollit nostrud dolor veniam ullamco. Sunt magna id velit in laborum nisi labore. Id deserunt labore dolore dolor aliqua culpa est id duis.\r\n", + "registered": "Saturday, March 29, 2014 8:18 AM", + "latitude": 60.328012, + "longitude": 126.657357, + "tags": [ + "dolore", + "laboris", + "proident", + "cillum", + "in", + "fugiat", + "incididunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bowen Cote" + }, + { + "id": 1, + "name": "Olga Gardner" + }, + { + "id": 2, + "name": "Evangeline Howard" + } + ], + "greeting": "Hello, Gloria! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8212b7e1e8201a38702", + "index": 3, + "guid": "bab757bd-2ebd-4c2c-86b7-0d4d8b059d35", + "isActive": true, + "balance": "$2,509.81", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Casey", + "last": "Hayes" + }, + "company": "SURELOGIC", + "email": "casey.hayes@surelogic.co.uk", + "phone": "+1 (993) 573-3937", + "address": "330 Tapscott Avenue, Eastvale, New Mexico, 928", + "about": "Eu elit sint sunt labore dolor cillum esse ad voluptate commodo. Dolor aliqua do dolore ex tempor sint consequat culpa et consectetur nisi voluptate reprehenderit. Dolor velit eu cillum tempor anim anim. Nostrud laboris eiusmod elit enim duis in consectetur esse anim qui. Et eiusmod culpa nulla anim et officia pariatur reprehenderit eiusmod veniam. Ullamco nisi ea incididunt velit. Ullamco cillum mollit ea aliqua ea eu et enim.\r\n", + "registered": "Sunday, September 14, 2014 8:35 AM", + "latitude": -43.494604, + "longitude": 95.217518, + "tags": [ + "officia", + "sunt", + "dolore", + "qui", + "elit", + "irure", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Serrano Wise" + }, + { + "id": 1, + "name": "Lorene Macias" + }, + { + "id": 2, + "name": "Kristen Lott" + } + ], + "greeting": "Hello, Casey! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821bfefa43403d5d054", + "index": 4, + "guid": "675d1598-8c45-4d67-a4df-d38a270de371", + "isActive": false, + "balance": "$3,887.07", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Price", + "last": "Oconnor" + }, + "company": "ANOCHA", + "email": "price.oconnor@anocha.tv", + "phone": "+1 (855) 410-3197", + "address": "447 Stockholm Street, Templeton, Wisconsin, 2216", + "about": "Cillum veniam esse duis tempor incididunt do dolor officia elit eu. Excepteur velit reprehenderit minim Lorem commodo est. Duis Lorem nisi elit aliquip est deserunt fugiat ut. Nisi tempor ex est pariatur laborum eiusmod anim eu nulla. Nisi enim id aute id ex id nostrud.\r\n", + "registered": "Wednesday, May 14, 2014 5:19 PM", + "latitude": 26.083477, + "longitude": 122.61114, + "tags": [ + "in", + "ad", + "aliqua", + "minim", + "nisi", + "cupidatat", + "id" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Montgomery Mccray" + }, + { + "id": 1, + "name": "Lucia Ferrell" + }, + { + "id": 2, + "name": "Glover Brock" + } + ], + "greeting": "Hello, Price! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a699260d8ed4439a", + "index": 5, + "guid": "5e271270-fef3-48a7-b389-346251b46abc", + "isActive": false, + "balance": "$1,046.50", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Rita", + "last": "Huber" + }, + "company": "VURBO", + "email": "rita.huber@vurbo.name", + "phone": "+1 (803) 589-3948", + "address": "838 River Street, Gadsden, American Samoa, 2602", + "about": "Culpa quis qui exercitation velit officia eu id qui consequat qui. Ea fugiat quis fugiat proident velit. Velit et reprehenderit quis irure adipisicing duis dolor id cupidatat ea aliqua elit.\r\n", + "registered": "Friday, April 11, 2014 11:56 AM", + "latitude": 30.717665, + "longitude": -29.687902, + "tags": [ + "veniam", + "ex", + "deserunt", + "cillum", + "sint", + "eu", + "proident" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Oliver Terrell" + }, + { + "id": 1, + "name": "Lora Shepherd" + }, + { + "id": 2, + "name": "Guzman Holman" + } + ], + "greeting": "Hello, Rita! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821b888230e87ce950e", + "index": 6, + "guid": "7e4efd9a-4923-42ae-8924-d6d5fae80ec0", + "isActive": false, + "balance": "$1,205.59", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Peterson", + "last": "Oliver" + }, + "company": "ZENTIX", + "email": "peterson.oliver@zentix.me", + "phone": "+1 (924) 564-2815", + "address": "596 Middleton Street, Walker, Louisiana, 3358", + "about": "Exercitation cillum sit exercitation voluptate duis nostrud incididunt cillum sint minim labore tempor minim ad. Esse ad id pariatur cillum id exercitation ullamco elit. Quis nisi excepteur mollit consectetur id et. Ea voluptate nulla duis minim exercitation aliqua aute nisi enim enim excepteur dolor ad non. Aliquip elit eu enim officia minim enim Lorem tempor. Cillum anim aute sunt cupidatat deserunt consequat.\r\n", + "registered": "Friday, March 28, 2014 2:28 PM", + "latitude": 45.092029, + "longitude": 56.730029, + "tags": [ + "in", + "voluptate", + "sit", + "sit", + "Lorem", + "reprehenderit", + "esse" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Wiley Henry" + }, + { + "id": 1, + "name": "Downs Rowland" + }, + { + "id": 2, + "name": "White Guerra" + } + ], + "greeting": "Hello, Peterson! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210f8589ab846b3d88", + "index": 7, + "guid": "afc25e30-7e70-4a87-bdd3-519e1837969a", + "isActive": false, + "balance": "$3,928.85", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Shauna", + "last": "Morse" + }, + "company": "CENTICE", + "email": "shauna.morse@centice.us", + "phone": "+1 (926) 517-3679", + "address": "752 Dunne Place, Ebro, Kansas, 3215", + "about": "Cupidatat incididunt sit duis tempor labore dolore aute qui magna in. Consequat aute ut veniam laborum aliqua Lorem esse. Cillum in qui sint excepteur eiusmod eiusmod eu anim adipisicing et.\r\n", + "registered": "Saturday, September 6, 2014 2:32 PM", + "latitude": 36.341849, + "longitude": 108.378341, + "tags": [ + "in", + "nulla", + "labore", + "qui", + "id", + "enim", + "fugiat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lizzie Carson" + }, + { + "id": 1, + "name": "Eliza Hall" + }, + { + "id": 2, + "name": "Baxter Burton" + } + ], + "greeting": "Hello, Shauna! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8213c997bd81d4a7fa5", + "index": 8, + "guid": "1337ad27-17e3-459f-90a3-a43b54b88184", + "isActive": true, + "balance": "$2,096.67", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "blue", + "name": { + "first": "Glenn", + "last": "Brooks" + }, + "company": "MANGLO", + "email": "glenn.brooks@manglo.ca", + "phone": "+1 (895) 595-2669", + "address": "605 McDonald Avenue, Nicholson, Indiana, 2302", + "about": "Deserunt incididunt ullamco dolore nostrud cupidatat sit consequat adipisicing incididunt sunt. Laboris fugiat et laboris est eu laborum culpa. Labore ad aliquip ut enim aute nulla quis cillum dolor aliqua. Culpa labore occaecat et sunt qui. Velit consequat ad proident non voluptate non mollit eu et cillum tempor. Velit quis deserunt Lorem cupidatat enim ut.\r\n", + "registered": "Friday, March 21, 2014 9:06 AM", + "latitude": -40.51084, + "longitude": -137.771438, + "tags": [ + "enim", + "laboris", + "culpa", + "do", + "nulla", + "anim", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Shelly Cardenas" + }, + { + "id": 1, + "name": "Kristine Mendoza" + }, + { + "id": 2, + "name": "Hall Hendrix" + } + ], + "greeting": "Hello, Glenn! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821054bcf388259b272", + "index": 9, + "guid": "cd9601fc-7ca7-4d54-830b-145ff5c5c147", + "isActive": true, + "balance": "$1,816.14", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Maribel", + "last": "Small" + }, + "company": "FUTURITY", + "email": "maribel.small@futurity.org", + "phone": "+1 (825) 532-2134", + "address": "424 Rockaway Parkway, Vale, Alaska, 1834", + "about": "Aliqua irure culpa exercitation nostrud qui exercitation deserunt ullamco culpa aliquip irure. Proident officia in consequat laborum ex adipisicing exercitation proident anim cupidatat excepteur anim. Labore irure pariatur laboris reprehenderit.\r\n", + "registered": "Tuesday, July 29, 2014 9:59 PM", + "latitude": 53.843872, + "longitude": 85.292318, + "tags": [ + "dolore", + "laborum", + "aute", + "aliqua", + "nostrud", + "commodo", + "commodo" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Anthony Bray" + }, + { + "id": 1, + "name": "Vicki Kelly" + }, + { + "id": 2, + "name": "Baird Wagner" + } + ], + "greeting": "Hello, Maribel! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821f14efb6f7f1210b9", + "index": 10, + "guid": "c9400d51-ea8d-4748-9a10-fa0e3a037b23", + "isActive": false, + "balance": "$1,395.15", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Kendra", + "last": "Knapp" + }, + "company": "UNCORP", + "email": "kendra.knapp@uncorp.biz", + "phone": "+1 (830) 509-3054", + "address": "765 Cameron Court, Ferney, Florida, 1963", + "about": "Duis irure ea qui ut velit nostrud. Lorem laborum excepteur do qui ad sit culpa. Labore mollit mollit deserunt sint aute officia qui laboris dolor aliqua magna in officia.\r\n", + "registered": "Sunday, April 27, 2014 11:55 AM", + "latitude": 84.200593, + "longitude": -155.377179, + "tags": [ + "laborum", + "labore", + "aliquip", + "ex", + "voluptate", + "dolor", + "Lorem" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Marva Cash" + }, + { + "id": 1, + "name": "Aimee Velez" + }, + { + "id": 2, + "name": "Eaton Delgado" + } + ], + "greeting": "Hello, Kendra! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82119105322ba52b407", + "index": 11, + "guid": "b6862230-95da-43d5-97ba-13ed4bcc0744", + "isActive": false, + "balance": "$1,442.51", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "brown", + "name": { + "first": "Katrina", + "last": "Ferguson" + }, + "company": "MANTRIX", + "email": "katrina.ferguson@mantrix.io", + "phone": "+1 (938) 541-3037", + "address": "447 Putnam Avenue, Collins, Georgia, 8421", + "about": "Minim aliquip Lorem fugiat et fugiat esse aliqua consectetur non officia esse. Fugiat irure eu ut irure cillum mollit nisi consequat do cillum. Est exercitation deserunt proident ex cupidatat. Elit aliquip pariatur ad minim adipisicing qui. Quis enim laborum incididunt eiusmod deserunt cillum amet enim. Proident et do voluptate esse laboris nisi. Duis cupidatat fugiat adipisicing aute velit et ullamco anim velit velit et excepteur laboris.\r\n", + "registered": "Tuesday, February 4, 2014 5:01 PM", + "latitude": 43.287084, + "longitude": 133.518964, + "tags": [ + "magna", + "officia", + "reprehenderit", + "excepteur", + "cillum", + "veniam", + "officia" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tameka Mccullough" + }, + { + "id": 1, + "name": "Madden Vincent" + }, + { + "id": 2, + "name": "Jewel Mccarthy" + } + ], + "greeting": "Hello, Katrina! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821ee55902ac207b17a", + "index": 12, + "guid": "50161207-4477-47b9-8aa7-a845c3c2e96f", + "isActive": false, + "balance": "$1,937.64", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Gilliam", + "last": "Flowers" + }, + "company": "INTERLOO", + "email": "gilliam.flowers@interloo.net", + "phone": "+1 (930) 564-2474", + "address": "986 Elton Street, Bagtown, Alabama, 9115", + "about": "Velit incididunt ut nulla adipisicing ad qui sint dolor cillum cupidatat in. Commodo aliqua deserunt ea eu irure irure nisi ullamco culpa nostrud. Adipisicing exercitation excepteur et id cupidatat. Ullamco ut incididunt proident est ad deserunt duis id ut. Excepteur cupidatat irure reprehenderit et excepteur minim cillum occaecat adipisicing. Commodo fugiat ad ex consectetur commodo dolore id nisi deserunt commodo aliquip. Veniam amet mollit nulla adipisicing eu minim sit magna incididunt adipisicing.\r\n", + "registered": "Tuesday, April 1, 2014 12:42 AM", + "latitude": -55.19047, + "longitude": 177.975351, + "tags": [ + "sint", + "pariatur", + "incididunt", + "exercitation", + "quis", + "ad", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Patsy Hunter" + }, + { + "id": 1, + "name": "Cecilia Green" + }, + { + "id": 2, + "name": "Meyer Jones" + } + ], + "greeting": "Hello, Gilliam! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821658f3962ce822678", + "index": 13, + "guid": "7017674f-79b3-43ed-8832-2b787c51f59d", + "isActive": false, + "balance": "$2,291.31", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Roberts", + "last": "Floyd" + }, + "company": "OVOLO", + "email": "roberts.floyd@ovolo.com", + "phone": "+1 (935) 401-2916", + "address": "723 Amherst Street, Brady, District Of Columbia, 4241", + "about": "Occaecat incididunt eu do quis est. Est mollit incididunt sint aute sunt. Consectetur incididunt officia eu fugiat quis officia pariatur excepteur sint. In enim nostrud nisi culpa. Ex incididunt exercitation id voluptate.\r\n", + "registered": "Thursday, June 19, 2014 4:16 PM", + "latitude": 72.321258, + "longitude": 28.548926, + "tags": [ + "et", + "ipsum", + "anim", + "dolor", + "commodo", + "do", + "exercitation" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tracey Vasquez" + }, + { + "id": 1, + "name": "Castro Harrell" + }, + { + "id": 2, + "name": "Sanders Barr" + } + ], + "greeting": "Hello, Roberts! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821f95ca5383b9eb53d", + "index": 14, + "guid": "a6532d9a-d291-4a51-92e7-33c50ceecc12", + "isActive": true, + "balance": "$3,310.31", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Miles", + "last": "Valdez" + }, + "company": "DENTREX", + "email": "miles.valdez@dentrex.biz", + "phone": "+1 (960) 513-3228", + "address": "726 Stillwell Place, Soham, Pennsylvania, 3510", + "about": "Sit labore ex commodo duis tempor labore officia et et est qui ullamco. Aute elit in labore laboris magna duis ipsum excepteur anim laboris ipsum magna magna non. Sint mollit eiusmod in est sint ipsum excepteur do anim cillum cillum.\r\n", + "registered": "Thursday, May 1, 2014 6:08 PM", + "latitude": 88.123309, + "longitude": -121.226418, + "tags": [ + "voluptate", + "sunt", + "anim", + "laboris", + "exercitation", + "deserunt", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bradford Horn" + }, + { + "id": 1, + "name": "Hanson Dillon" + }, + { + "id": 2, + "name": "Whitley Stanley" + } + ], + "greeting": "Hello, Miles! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821c52f80cda8ff36b3", + "index": 15, + "guid": "63820e33-a8c7-410e-afa9-32b7f7017a32", + "isActive": true, + "balance": "$2,616.09", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Floyd", + "last": "Barker" + }, + "company": "TALAE", + "email": "floyd.barker@talae.co.uk", + "phone": "+1 (843) 435-2898", + "address": "501 Sullivan Place, Cotopaxi, Nevada, 5498", + "about": "Non deserunt voluptate occaecat est mollit dolor aliqua. Qui elit aute qui aliquip ipsum et labore est aliquip pariatur. Sint deserunt tempor dolore excepteur elit est sint in est ex anim. Nostrud culpa amet eiusmod incididunt. Ea exercitation amet labore cillum culpa duis aute incididunt dolore sunt. Cillum velit laboris quis eiusmod fugiat consectetur sit fugiat irure labore.\r\n", + "registered": "Monday, March 10, 2014 7:48 AM", + "latitude": -86.397923, + "longitude": 171.646534, + "tags": [ + "dolore", + "sit", + "qui", + "id", + "aliquip", + "mollit", + "laborum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Eileen Daniels" + }, + { + "id": 1, + "name": "Genevieve Wood" + }, + { + "id": 2, + "name": "Carver Fields" + } + ], + "greeting": "Hello, Floyd! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8219eb621c418384935", + "index": 16, + "guid": "ed67eac2-cfdf-4d28-a734-bc973cba8613", + "isActive": false, + "balance": "$1,917.58", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Carrillo", + "last": "Cox" + }, + "company": "ARCHITAX", + "email": "carrillo.cox@architax.tv", + "phone": "+1 (818) 444-3875", + "address": "309 Randolph Street, Avoca, Illinois, 770", + "about": "Labore consequat et nostrud officia ad. Sint ipsum ipsum sint laboris adipisicing minim voluptate aliqua proident est commodo nulla. Officia sint ipsum laborum aliquip adipisicing adipisicing ea et reprehenderit dolore. Et deserunt sint incididunt velit dolore voluptate deserunt anim nisi sit est officia fugiat. Velit dolore ea do enim veniam ut do. Duis adipisicing fugiat magna Lorem ullamco quis sint ut cupidatat laborum aute laboris sint aliqua.\r\n", + "registered": "Friday, January 17, 2014 12:19 AM", + "latitude": -31.228015, + "longitude": -82.248255, + "tags": [ + "occaecat", + "nostrud", + "ex", + "dolor", + "magna", + "minim", + "pariatur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Douglas Mayer" + }, + { + "id": 1, + "name": "Dorothy Riddle" + }, + { + "id": 2, + "name": "Melanie Thompson" + } + ], + "greeting": "Hello, Carrillo! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821e152869356625777", + "index": 17, + "guid": "1dc17af3-a194-42e8-add8-a73853f16da2", + "isActive": true, + "balance": "$1,703.08", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "green", + "name": { + "first": "Ana", + "last": "Reese" + }, + "company": "FORTEAN", + "email": "ana.reese@fortean.name", + "phone": "+1 (876) 419-2128", + "address": "451 Brevoort Place, Leola, Tennessee, 3725", + "about": "Consectetur officia irure proident nulla. Anim veniam mollit sit id aliqua. Do reprehenderit culpa magna magna aute est pariatur consequat ut occaecat cillum adipisicing consectetur. Sint qui pariatur id velit deserunt laborum. Minim consequat ut sunt qui. Ex occaecat tempor fugiat sit anim veniam incididunt mollit mollit. Non id anim cillum culpa tempor voluptate aute consequat proident reprehenderit.\r\n", + "registered": "Saturday, June 28, 2014 4:53 AM", + "latitude": 80.18306, + "longitude": 70.818006, + "tags": [ + "mollit", + "voluptate", + "est", + "magna", + "ad", + "duis", + "est" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mclaughlin Johns" + }, + { + "id": 1, + "name": "Leanne Hanson" + }, + { + "id": 2, + "name": "Isabel Leon" + } + ], + "greeting": "Hello, Ana! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82168ed66f19f510aea", + "index": 18, + "guid": "87be73dd-bf5e-48c4-8168-f8f76cda905d", + "isActive": true, + "balance": "$1,307.88", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Daugherty", + "last": "Ware" + }, + "company": "TYPHONICA", + "email": "daugherty.ware@typhonica.me", + "phone": "+1 (936) 470-3445", + "address": "919 Oriental Boulevard, Westboro, Iowa, 2587", + "about": "Occaecat in nisi et consequat. Laboris minim consequat qui proident id aute occaecat pariatur. Sint esse anim id ex voluptate fugiat culpa anim commodo incididunt.\r\n", + "registered": "Tuesday, March 4, 2014 11:53 PM", + "latitude": -15.007384, + "longitude": -86.496257, + "tags": [ + "consequat", + "nisi", + "duis", + "cupidatat", + "anim", + "eu", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maddox Wells" + }, + { + "id": 1, + "name": "Maura May" + }, + { + "id": 2, + "name": "Terry Calhoun" + } + ], + "greeting": "Hello, Daugherty! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8210abc44da895c5591", + "index": 19, + "guid": "32c4c5d0-b54c-4ea4-999a-f4fa517ac5ce", + "isActive": true, + "balance": "$2,706.98", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Sonja", + "last": "Craft" + }, + "company": "KOZGENE", + "email": "sonja.craft@kozgene.us", + "phone": "+1 (808) 410-3427", + "address": "808 Lawn Court, Blodgett, Massachusetts, 560", + "about": "Eu occaecat reprehenderit ea ad ullamco ea sint cupidatat ex. Deserunt eu est veniam consectetur do anim in. Dolore minim veniam dolore elit sunt labore id eiusmod.\r\n", + "registered": "Sunday, August 31, 2014 12:09 AM", + "latitude": -47.101894, + "longitude": -130.294589, + "tags": [ + "sint", + "cillum", + "magna", + "sit", + "fugiat", + "nisi", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Loretta Mcgee" + }, + { + "id": 1, + "name": "Wilson Merritt" + }, + { + "id": 2, + "name": "Susanne Lloyd" + } + ], + "greeting": "Hello, Sonja! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82134480e13e931193a", + "index": 20, + "guid": "c4a7ded5-aa3e-411f-9956-f4b938ce93dc", + "isActive": false, + "balance": "$3,216.50", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Powers", + "last": "Mathews" + }, + "company": "ANDRYX", + "email": "powers.mathews@andryx.ca", + "phone": "+1 (914) 559-2596", + "address": "545 Nolans Lane, Brenton, Arkansas, 7607", + "about": "In irure et tempor ad commodo culpa reprehenderit excepteur tempor ex. Exercitation eiusmod consequat anim incididunt veniam duis sunt velit sunt aliquip esse adipisicing do. Elit ea incididunt id amet mollit ea in ad ea cupidatat duis minim consectetur incididunt.\r\n", + "registered": "Friday, July 11, 2014 11:27 AM", + "latitude": 64.259332, + "longitude": 111.604942, + "tags": [ + "ut", + "ex", + "cillum", + "commodo", + "pariatur", + "ex", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Woodward Witt" + }, + { + "id": 1, + "name": "Hazel Mcfadden" + }, + { + "id": 2, + "name": "Desiree Mclean" + } + ], + "greeting": "Hello, Powers! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821117e0b8bd3f6e566", + "index": 21, + "guid": "fae9ba5c-948b-429c-b1e6-a0a6835f0694", + "isActive": false, + "balance": "$1,046.41", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "brown", + "name": { + "first": "Lola", + "last": "Roy" + }, + "company": "EURON", + "email": "lola.roy@euron.org", + "phone": "+1 (920) 413-2000", + "address": "237 Tabor Court, Berwind, Hawaii, 2276", + "about": "Aute voluptate proident occaecat exercitation aute proident ullamco veniam aute magna velit cupidatat. Occaecat dolor aliquip adipisicing dolor do elit eu elit laboris officia magna dolore. Velit tempor sit ad et occaecat nisi elit excepteur. Non velit sint deserunt culpa magna irure.\r\n", + "registered": "Thursday, February 27, 2014 1:20 AM", + "latitude": 12.90929, + "longitude": 68.693395, + "tags": [ + "ad", + "officia", + "non", + "aute", + "magna", + "minim", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lee Tate" + }, + { + "id": 1, + "name": "Miranda Payne" + }, + { + "id": 2, + "name": "Jocelyn Cantrell" + } + ], + "greeting": "Hello, Lola! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8211cf627425d947100", + "index": 22, + "guid": "590c913b-2457-47a5-ad5c-8a5fc3c249f9", + "isActive": true, + "balance": "$2,144.16", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "green", + "name": { + "first": "Marci", + "last": "Mcpherson" + }, + "company": "VIAGREAT", + "email": "marci.mcpherson@viagreat.biz", + "phone": "+1 (916) 417-2166", + "address": "527 Kenilworth Place, Bartonsville, Puerto Rico, 4739", + "about": "Duis velit irure sit sit aliquip sit culpa velit labore velit ipsum amet. Pariatur labore ex et sunt proident ad minim. Aliquip qui adipisicing elit do sunt mollit irure adipisicing in labore cillum. Ut velit dolor cillum irure voluptate ad incididunt consequat cillum esse laborum consequat do.\r\n", + "registered": "Wednesday, August 27, 2014 6:55 AM", + "latitude": 23.135493, + "longitude": -133.213153, + "tags": [ + "ut", + "ex", + "deserunt", + "mollit", + "cillum", + "aliquip", + "excepteur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Latisha Ortiz" + }, + { + "id": 1, + "name": "Ila Marshall" + }, + { + "id": 2, + "name": "Kathie Strong" + } + ], + "greeting": "Hello, Marci! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82113ff45aaf64d6b75", + "index": 23, + "guid": "a074b7d2-92de-4728-9032-ac711cc8ca1b", + "isActive": true, + "balance": "$1,927.67", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Lorie", + "last": "Haynes" + }, + "company": "CUBIX", + "email": "lorie.haynes@cubix.io", + "phone": "+1 (914) 479-2574", + "address": "209 Stoddard Place, Grahamtown, New Hampshire, 3422", + "about": "Commodo eu reprehenderit aute veniam occaecat eiusmod ex enim mollit elit. Officia fugiat proident cillum sint sint. In anim occaecat in dolore pariatur occaecat dolore eu duis sint veniam labore tempor id.\r\n", + "registered": "Thursday, June 19, 2014 3:03 AM", + "latitude": -80.694066, + "longitude": 98.315178, + "tags": [ + "proident", + "est", + "nulla", + "minim", + "aute", + "duis", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Shana Jensen" + }, + { + "id": 1, + "name": "Audra Hays" + }, + { + "id": 2, + "name": "Shannon Stewart" + } + ], + "greeting": "Hello, Lorie! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8216788a07fe633c5a6", + "index": 24, + "guid": "ff361134-2ce3-4cce-b043-d571e87a041d", + "isActive": false, + "balance": "$1,274.62", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Parker", + "last": "Higgins" + }, + "company": "ISOSTREAM", + "email": "parker.higgins@isostream.net", + "phone": "+1 (965) 467-3975", + "address": "908 Division Place, Homeland, South Carolina, 2577", + "about": "Laborum minim consectetur ipsum incididunt cupidatat ex ad labore eu non est consequat. Tempor eiusmod commodo Lorem enim aliquip ad non sint ipsum culpa amet. Eu sit amet velit est sit cupidatat aliquip magna proident id veniam Lorem dolore. Eiusmod ex amet proident enim ipsum proident mollit adipisicing ut.\r\n", + "registered": "Monday, August 25, 2014 4:51 AM", + "latitude": -28.784274, + "longitude": -151.224185, + "tags": [ + "ea", + "cupidatat", + "do", + "culpa", + "ea", + "ullamco", + "nulla" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bridgette Tyler" + }, + { + "id": 1, + "name": "Harris Pollard" + }, + { + "id": 2, + "name": "Davenport Skinner" + } + ], + "greeting": "Hello, Parker! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ff898d23056717a4", + "index": 25, + "guid": "f5e85a0d-f46e-427e-86a7-657eaaadb169", + "isActive": true, + "balance": "$3,413.58", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "brown", + "name": { + "first": "Rios", + "last": "Reilly" + }, + "company": "QUILTIGEN", + "email": "rios.reilly@quiltigen.com", + "phone": "+1 (982) 565-3930", + "address": "789 Beekman Place, Wiscon, Texas, 8745", + "about": "Consectetur qui do sint deserunt voluptate sunt dolor in officia aliquip. Eu irure sit veniam nostrud culpa laboris. Commodo nostrud cillum nulla nostrud.\r\n", + "registered": "Thursday, September 4, 2014 1:54 PM", + "latitude": 6.093115, + "longitude": 145.037939, + "tags": [ + "eu", + "consectetur", + "veniam", + "pariatur", + "laboris", + "ad", + "cupidatat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sweet Conley" + }, + { + "id": 1, + "name": "Key Grant" + }, + { + "id": 2, + "name": "Guthrie Moss" + } + ], + "greeting": "Hello, Rios! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82125f7c765fcbb1743", + "index": 26, + "guid": "e5f12323-5c7c-4103-b20c-1a633845a28c", + "isActive": true, + "balance": "$1,645.61", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Hurley", + "last": "Cooke" + }, + "company": "QUORDATE", + "email": "hurley.cooke@quordate.biz", + "phone": "+1 (841) 404-3894", + "address": "369 Denton Place, Curtice, South Dakota, 2613", + "about": "Nulla non in aliqua sit mollit pariatur do mollit. Ut pariatur ut velit minim. Fugiat deserunt velit duis consequat labore culpa voluptate sint voluptate consectetur officia voluptate et laborum. Et exercitation ut eu pariatur minim velit elit. Dolore amet officia ipsum voluptate occaecat eiusmod cupidatat do dolore consequat esse consectetur aliquip.\r\n", + "registered": "Sunday, April 13, 2014 11:42 AM", + "latitude": -78.463811, + "longitude": 36.580914, + "tags": [ + "deserunt", + "nisi", + "do", + "enim", + "nisi", + "qui", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Harvey Norman" + }, + { + "id": 1, + "name": "Porter Shannon" + }, + { + "id": 2, + "name": "Reyes Goodman" + } + ], + "greeting": "Hello, Hurley! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821f375b9cabd418303", + "index": 27, + "guid": "452fe001-7fff-4f69-9336-00e3e80e9792", + "isActive": true, + "balance": "$2,608.67", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Jill", + "last": "Blair" + }, + "company": "ORBALIX", + "email": "jill.blair@orbalix.co.uk", + "phone": "+1 (863) 519-2778", + "address": "680 Cleveland Street, Kohatk, Ohio, 1688", + "about": "Do labore sint cupidatat dolor. Mollit nulla voluptate nostrud tempor ad cillum in mollit officia reprehenderit duis commodo veniam ad. Adipisicing enim adipisicing consequat sint minim ut. Cupidatat non ullamco sunt mollit proident. Aliquip dolore dolor excepteur cupidatat. Consectetur duis adipisicing qui enim aute quis veniam deserunt occaecat. Duis elit exercitation ullamco voluptate aliqua.\r\n", + "registered": "Tuesday, September 30, 2014 11:23 PM", + "latitude": -33.279869, + "longitude": 6.221211, + "tags": [ + "nostrud", + "elit", + "adipisicing", + "esse", + "in", + "commodo", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tania Flores" + }, + { + "id": 1, + "name": "Nina Blackburn" + }, + { + "id": 2, + "name": "Mathews Fischer" + } + ], + "greeting": "Hello, Jill! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821304372a99199671f", + "index": 28, + "guid": "d160ded3-911c-4ce2-a314-9ed9a8e6fa9b", + "isActive": true, + "balance": "$3,005.54", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "brown", + "name": { + "first": "Estela", + "last": "Dalton" + }, + "company": "POWERNET", + "email": "estela.dalton@powernet.tv", + "phone": "+1 (959) 527-2607", + "address": "820 Montauk Avenue, Whitmer, Maine, 3867", + "about": "Commodo est ullamco sit eu irure tempor veniam deserunt in aute cillum tempor. Occaecat velit et deserunt incididunt sint do eu consectetur enim ullamco consectetur esse ipsum pariatur. Tempor exercitation dolore tempor enim. Dolor esse est magna occaecat. Elit culpa sint non ea exercitation. Aliquip nostrud aliquip culpa Lorem cillum incididunt do sit sunt velit id. Proident sit proident est velit consequat cillum officia in et.\r\n", + "registered": "Sunday, April 13, 2014 5:53 AM", + "latitude": 32.713335, + "longitude": 174.505048, + "tags": [ + "aliquip", + "dolore", + "proident", + "pariatur", + "elit", + "cillum", + "id" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Gibson Durham" + }, + { + "id": 1, + "name": "Carolina Cooley" + }, + { + "id": 2, + "name": "Rosa Mcintyre" + } + ], + "greeting": "Hello, Estela! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82187c6143c96c5ce1f", + "index": 29, + "guid": "f62ae8de-8905-4ac0-9b5c-ce12dad96a86", + "isActive": false, + "balance": "$1,859.49", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": { + "first": "Martina", + "last": "Jacobson" + }, + "company": "CUIZINE", + "email": "martina.jacobson@cuizine.name", + "phone": "+1 (927) 493-2997", + "address": "234 Utica Avenue, Hinsdale, Vermont, 459", + "about": "Pariatur nulla ad sint tempor qui in id aliqua ex et ut. Qui occaecat quis veniam mollit officia duis ad ea. Est consectetur sit sint proident sit do. Id ut incididunt tempor id irure. Qui commodo cillum labore anim eiusmod exercitation ea qui nulla qui amet.\r\n", + "registered": "Sunday, February 9, 2014 3:54 AM", + "latitude": -36.70558, + "longitude": -140.397297, + "tags": [ + "voluptate", + "adipisicing", + "do", + "deserunt", + "aliquip", + "est", + "minim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Griffith Martinez" + }, + { + "id": 1, + "name": "Richard Chavez" + }, + { + "id": 2, + "name": "Mckinney Butler" + } + ], + "greeting": "Hello, Martina! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821392ebf56130b6eaa", + "index": 30, + "guid": "91c5f9c9-d3c7-435a-98cc-06e360c12e1d", + "isActive": true, + "balance": "$3,693.79", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "brown", + "name": { + "first": "Althea", + "last": "Valencia" + }, + "company": "BLANET", + "email": "althea.valencia@blanet.me", + "phone": "+1 (887) 501-3212", + "address": "911 Adams Street, Brambleton, Delaware, 5831", + "about": "Quis culpa exercitation dolor anim. Id labore ea aute aliqua. Dolor dolore sit duis anim cillum nostrud officia dolor sit. Laborum tempor dolore id consequat.\r\n", + "registered": "Saturday, January 11, 2014 6:27 PM", + "latitude": -39.101471, + "longitude": -22.991091, + "tags": [ + "eu", + "anim", + "elit", + "pariatur", + "cupidatat", + "cupidatat", + "enim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Dawson Foreman" + }, + { + "id": 1, + "name": "Sanford Meyer" + }, + { + "id": 2, + "name": "Ruth Barron" + } + ], + "greeting": "Hello, Althea! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82111b129b44454e349", + "index": 31, + "guid": "184ea3dd-af3f-400a-aa64-429b6cac091f", + "isActive": true, + "balance": "$1,351.78", + "picture": "http://placehold.it/32x32", + "age": 20, + "eyeColor": "blue", + "name": { + "first": "Morrison", + "last": "Chan" + }, + "company": "TALKALOT", + "email": "morrison.chan@talkalot.us", + "phone": "+1 (822) 448-3384", + "address": "599 Olive Street, Franklin, Palau, 3303", + "about": "Ex deserunt nulla velit dolore. Sunt sit ea irure incididunt aute sint do veniam. Sit ut ad ipsum est velit ea duis exercitation aliquip consectetur Lorem fugiat eu.\r\n", + "registered": "Sunday, February 16, 2014 2:04 PM", + "latitude": 61.099205, + "longitude": -37.736061, + "tags": [ + "eu", + "deserunt", + "pariatur", + "labore", + "reprehenderit", + "magna", + "consectetur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Norma Montoya" + }, + { + "id": 1, + "name": "Bailey Gillespie" + }, + { + "id": 2, + "name": "Candace Kent" + } + ], + "greeting": "Hello, Morrison! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821aec56cb0ce50b54b", + "index": 32, + "guid": "b211ab76-8674-4ed0-9a40-2087930468ad", + "isActive": false, + "balance": "$1,492.99", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Walters", + "last": "Potts" + }, + "company": "FLUMBO", + "email": "walters.potts@flumbo.ca", + "phone": "+1 (871) 461-3958", + "address": "438 Highland Avenue, Elwood, Arizona, 9669", + "about": "Lorem do Lorem aliquip ipsum. Elit labore reprehenderit tempor do. Incididunt labore ad eu occaecat enim laborum irure elit nulla Lorem anim sit exercitation velit. Proident ullamco voluptate aute ex et aute mollit nostrud. Adipisicing labore sit irure amet dolore nostrud. Tempor nulla aliqua culpa commodo aliqua ut esse velit mollit ad. Aliqua nulla enim non nisi laboris sint aute duis proident qui officia.\r\n", + "registered": "Saturday, September 6, 2014 11:47 AM", + "latitude": 64.732922, + "longitude": -168.513014, + "tags": [ + "tempor", + "amet", + "dolore", + "proident", + "reprehenderit", + "non", + "tempor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Freida Bailey" + }, + { + "id": 1, + "name": "Bernice Curry" + }, + { + "id": 2, + "name": "Ochoa Jefferson" + } + ], + "greeting": "Hello, Walters! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82137be18ee6b852a45", + "index": 33, + "guid": "e8c67cca-c977-4668-9aff-46bfde1cd3de", + "isActive": true, + "balance": "$3,747.65", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "blue", + "name": { + "first": "Meredith", + "last": "Santana" + }, + "company": "GADTRON", + "email": "meredith.santana@gadtron.org", + "phone": "+1 (836) 438-3637", + "address": "999 Centre Street, Chaparrito, Colorado, 4540", + "about": "Magna nisi laboris sit quis duis anim et ullamco nostrud exercitation. Tempor enim nisi non culpa sit ex elit labore proident veniam dolore anim ex. Nostrud est qui do magna proident et. Nulla ea laboris incididunt elit labore id mollit reprehenderit. Amet in Lorem exercitation tempor voluptate labore anim adipisicing labore in dolor proident labore. Lorem labore duis ex Lorem nulla. Veniam in fugiat ex ullamco officia elit eiusmod enim.\r\n", + "registered": "Sunday, March 2, 2014 1:38 PM", + "latitude": 80.220732, + "longitude": 79.102966, + "tags": [ + "culpa", + "esse", + "velit", + "consectetur", + "incididunt", + "dolore", + "aliqua" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bernadine Manning" + }, + { + "id": 1, + "name": "Daphne Wyatt" + }, + { + "id": 2, + "name": "Keri Harrison" + } + ], + "greeting": "Hello, Meredith! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ecdd700bb0b77dc2", + "index": 34, + "guid": "b93f3a6f-f05b-4d7e-93b8-4502d9c76cd2", + "isActive": true, + "balance": "$3,059.56", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "brown", + "name": { + "first": "Samantha", + "last": "Finley" + }, + "company": "XYQAG", + "email": "samantha.finley@xyqag.biz", + "phone": "+1 (879) 568-2419", + "address": "349 Truxton Street, Haring, Missouri, 8383", + "about": "Consequat do id et quis eiusmod eu irure sunt qui. Mollit minim nulla magna duis nostrud cillum ullamco sunt adipisicing elit ex. Minim fugiat deserunt nostrud esse laboris ullamco sit sit magna. Tempor occaecat Lorem qui ad ut tempor excepteur. Et sunt ullamco officia et Lorem est. Ipsum dolor ut ut elit do nisi in aute consequat. Enim esse ex aliqua anim aliquip cupidatat do Lorem voluptate quis ea culpa incididunt reprehenderit.\r\n", + "registered": "Saturday, March 29, 2014 1:38 PM", + "latitude": 79.209401, + "longitude": -139.211605, + "tags": [ + "irure", + "eiusmod", + "nulla", + "officia", + "eu", + "elit", + "nisi" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mayer Justice" + }, + { + "id": 1, + "name": "Mae Hancock" + }, + { + "id": 2, + "name": "Sherri Bradshaw" + } + ], + "greeting": "Hello, Samantha! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8218ac5365ad300d1bd", + "index": 35, + "guid": "f3b50cb9-da35-47fe-b0fa-fec9fc84cf8e", + "isActive": false, + "balance": "$2,819.28", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "blue", + "name": { + "first": "Cohen", + "last": "Finch" + }, + "company": "SYNTAC", + "email": "cohen.finch@syntac.io", + "phone": "+1 (950) 459-2729", + "address": "436 Pineapple Street, Deercroft, Minnesota, 3218", + "about": "Sint velit officia quis esse. Nulla aute laborum veniam dolore tempor adipisicing proident. Duis irure esse nostrud veniam est mollit mollit voluptate eiusmod anim veniam eiusmod. Ullamco sunt sit sint minim ea reprehenderit qui consequat ipsum. Sint id voluptate reprehenderit irure nulla veniam eu Lorem enim nulla. Cupidatat amet pariatur dolor amet ex nostrud dolor ipsum tempor enim nulla aliquip tempor Lorem.\r\n", + "registered": "Wednesday, October 8, 2014 10:04 PM", + "latitude": -84.299718, + "longitude": 52.573184, + "tags": [ + "deserunt", + "eu", + "consectetur", + "ea", + "non", + "officia", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Stevenson Mcintosh" + }, + { + "id": 1, + "name": "Chrystal Oneill" + }, + { + "id": 2, + "name": "Janine Rowe" + } + ], + "greeting": "Hello, Cohen! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82161d5c66d4bd0996e", + "index": 36, + "guid": "6dfc2232-f3c6-4818-956a-e8c683fd69fe", + "isActive": true, + "balance": "$3,262.20", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Joy", + "last": "Moran" + }, + "company": "UPDAT", + "email": "joy.moran@updat.net", + "phone": "+1 (968) 581-3365", + "address": "279 Kosciusko Street, Smock, Northern Mariana Islands, 5007", + "about": "Quis elit pariatur eu enim magna magna sunt dolore duis commodo. Pariatur sint duis ex aute eu est deserunt culpa fugiat minim non. Tempor consequat consectetur consequat sit incididunt officia id. Incididunt ex eiusmod excepteur aute mollit veniam quis excepteur occaecat excepteur deserunt reprehenderit. Est sit laboris eu dolor. Sunt voluptate quis aliquip nulla ex irure velit in. Aliqua sit id eiusmod amet commodo pariatur deserunt voluptate qui minim ex incididunt voluptate.\r\n", + "registered": "Saturday, February 8, 2014 9:55 PM", + "latitude": 53.735731, + "longitude": 46.00211, + "tags": [ + "exercitation", + "dolor", + "minim", + "incididunt", + "laborum", + "qui", + "sit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Joyce Mckay" + }, + { + "id": 1, + "name": "Turner Murray" + }, + { + "id": 2, + "name": "Jackson Jackson" + } + ], + "greeting": "Hello, Joy! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a9817ca1a9be6517", + "index": 37, + "guid": "84fe8707-cfbc-4434-98bb-faf9cb97471a", + "isActive": true, + "balance": "$3,224.80", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "blue", + "name": { + "first": "Brennan", + "last": "Stafford" + }, + "company": "LOVEPAD", + "email": "brennan.stafford@lovepad.com", + "phone": "+1 (873) 405-3600", + "address": "471 Ryder Avenue, Succasunna, Marshall Islands, 2595", + "about": "Et officia quis magna laborum et proident labore elit do Lorem reprehenderit irure. Laborum culpa culpa voluptate commodo consequat non et amet. Mollit cupidatat irure magna sint commodo ipsum proident tempor est. Laboris exercitation aliqua aute deserunt do in aliqua minim ex excepteur. Consequat in minim officia labore laboris laboris occaecat occaecat. Ex qui aliquip sint consectetur elit excepteur incididunt eu non laborum do eu excepteur.\r\n", + "registered": "Saturday, May 31, 2014 3:28 PM", + "latitude": -69.429728, + "longitude": 46.837644, + "tags": [ + "proident", + "ad", + "non", + "duis", + "occaecat", + "proident", + "non" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Jones Kirk" + }, + { + "id": 1, + "name": "Howe Drake" + }, + { + "id": 2, + "name": "Kimberly Jennings" + } + ], + "greeting": "Hello, Brennan! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8219251dc49e1cb846a", + "index": 38, + "guid": "c260bce1-46ac-4e84-9490-13eb8202904e", + "isActive": true, + "balance": "$1,330.69", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "brown", + "name": { + "first": "Neal", + "last": "Mooney" + }, + "company": "SOFTMICRO", + "email": "neal.mooney@softmicro.biz", + "phone": "+1 (883) 463-3623", + "address": "818 Lancaster Avenue, Chelsea, New Jersey, 3590", + "about": "Reprehenderit anim nostrud adipisicing non minim ea. Elit deserunt id in mollit nisi. Pariatur in consequat irure aliqua laboris ipsum.\r\n", + "registered": "Wednesday, May 21, 2014 4:33 PM", + "latitude": -57.826881, + "longitude": 154.840249, + "tags": [ + "consequat", + "aliquip", + "pariatur", + "nulla", + "dolore", + "deserunt", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Beach Roman" + }, + { + "id": 1, + "name": "Nash Young" + }, + { + "id": 2, + "name": "Carey Dale" + } + ], + "greeting": "Hello, Neal! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821cedc96e4f2209487", + "index": 39, + "guid": "ed7bbe27-811c-44e4-896e-cdf6ef62e048", + "isActive": false, + "balance": "$3,148.21", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "green", + "name": { + "first": "Roy", + "last": "Becker" + }, + "company": "KONGENE", + "email": "roy.becker@kongene.co.uk", + "phone": "+1 (895) 426-2172", + "address": "414 Seagate Avenue, Watrous, Virgin Islands, 3905", + "about": "Commodo mollit do minim dolor magna occaecat labore Lorem eiusmod. Occaecat mollit occaecat ex anim est amet irure non minim. Tempor laborum cupidatat tempor ex Lorem cupidatat incididunt ullamco fugiat Lorem consequat labore Lorem non.\r\n", + "registered": "Sunday, August 17, 2014 3:16 PM", + "latitude": -2.609533, + "longitude": -143.844769, + "tags": [ + "do", + "culpa", + "sint", + "ea", + "duis", + "aliqua", + "anim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lowery Hull" + }, + { + "id": 1, + "name": "Abbott Oneal" + }, + { + "id": 2, + "name": "Nellie Hammond" + } + ], + "greeting": "Hello, Roy! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821668031e7f50e30e9", + "index": 40, + "guid": "14dc3a87-0acf-4edd-93e4-ddcbeeecf96b", + "isActive": false, + "balance": "$2,617.16", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "brown", + "name": { + "first": "Courtney", + "last": "Watson" + }, + "company": "ISOSWITCH", + "email": "courtney.watson@isoswitch.tv", + "phone": "+1 (882) 468-2163", + "address": "385 Douglass Street, Iberia, Oregon, 7802", + "about": "Culpa sunt amet eu magna id quis quis irure velit. Culpa nostrud do enim proident officia. Laboris laborum laborum esse irure proident laborum amet sunt ipsum dolor nulla non ipsum sint. Amet deserunt in esse aliquip laboris proident fugiat nisi cillum ullamco occaecat est. Reprehenderit laborum enim labore ex. Velit do adipisicing irure dolor pariatur duis magna velit. Laborum sint laborum eu anim aliquip adipisicing labore.\r\n", + "registered": "Tuesday, July 22, 2014 6:03 PM", + "latitude": -75.831312, + "longitude": -172.468604, + "tags": [ + "dolor", + "velit", + "et", + "id", + "cupidatat", + "exercitation", + "laborum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mccray Gomez" + }, + { + "id": 1, + "name": "Hoover Rasmussen" + }, + { + "id": 2, + "name": "Hillary Castillo" + } + ], + "greeting": "Hello, Courtney! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8213c480453670f0428", + "index": 41, + "guid": "e7b97ea6-a13f-42ab-a998-e48614000aca", + "isActive": true, + "balance": "$1,499.72", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Dale", + "last": "Love" + }, + "company": "BULLJUICE", + "email": "dale.love@bulljuice.name", + "phone": "+1 (933) 588-3310", + "address": "603 Myrtle Avenue, Allentown, Utah, 793", + "about": "Ad quis anim commodo nulla et anim minim commodo irure excepteur. Pariatur ut anim aliquip id ex ipsum exercitation irure qui in nisi quis. Cillum deserunt duis dolore quis nostrud incididunt ipsum ea ipsum id fugiat eu voluptate nisi. Exercitation laborum fugiat irure anim. Ex nostrud aliqua deserunt amet.\r\n", + "registered": "Tuesday, July 22, 2014 1:19 PM", + "latitude": 55.335017, + "longitude": 101.730023, + "tags": [ + "ea", + "non", + "fugiat", + "Lorem", + "tempor", + "ut", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Spears Diaz" + }, + { + "id": 1, + "name": "Nora Dominguez" + }, + { + "id": 2, + "name": "Tamra Paul" + } + ], + "greeting": "Hello, Dale! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c82bf70ca08206a9", + "index": 42, + "guid": "4367142d-e2b7-475c-b430-ac617295fbfc", + "isActive": false, + "balance": "$2,698.29", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "blue", + "name": { + "first": "Gibbs", + "last": "Hunt" + }, + "company": "COWTOWN", + "email": "gibbs.hunt@cowtown.me", + "phone": "+1 (960) 424-3404", + "address": "758 Suydam Place, Adamstown, North Carolina, 2800", + "about": "Veniam qui incididunt officia amet commodo nostrud. Magna consectetur consectetur officia Lorem amet sit officia excepteur minim consectetur pariatur dolore. Mollit fugiat aliqua consectetur qui non elit in aliquip culpa Lorem consectetur velit ad.\r\n", + "registered": "Monday, February 10, 2014 4:10 PM", + "latitude": 62.263652, + "longitude": 64.978136, + "tags": [ + "et", + "dolor", + "aute", + "minim", + "sunt", + "veniam", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Henry Schmidt" + }, + { + "id": 1, + "name": "Herman Wynn" + }, + { + "id": 2, + "name": "Wilda Grimes" + } + ], + "greeting": "Hello, Gibbs! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210b1096c8a2f821c4", + "index": 43, + "guid": "5184508c-47f7-48b5-85ac-d15ed747ed07", + "isActive": true, + "balance": "$3,460.45", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Francis", + "last": "Barton" + }, + "company": "COMTRAIL", + "email": "francis.barton@comtrail.us", + "phone": "+1 (881) 419-2936", + "address": "562 Ferris Street, Ola, Maryland, 3838", + "about": "Duis minim laboris in reprehenderit id ut laborum esse consequat. In dolore sunt consequat non fugiat do duis duis. Officia ipsum eiusmod laboris do aliqua aute velit minim nulla nisi. Dolor incididunt enim est eu cupidatat. Dolor commodo sit consectetur irure aliqua ea enim esse reprehenderit ullamco.\r\n", + "registered": "Thursday, April 10, 2014 2:48 PM", + "latitude": -35.457713, + "longitude": 141.805123, + "tags": [ + "tempor", + "officia", + "quis", + "tempor", + "ex", + "mollit", + "amet" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mayra Walters" + }, + { + "id": 1, + "name": "Casey Gross" + }, + { + "id": 2, + "name": "Aisha Santos" + } + ], + "greeting": "Hello, Francis! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821890ad48b2eaabcfb", + "index": 44, + "guid": "07497907-7e6f-471d-af9d-1dcbcf056a56", + "isActive": true, + "balance": "$2,166.79", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Dina", + "last": "Travis" + }, + "company": "VENOFLEX", + "email": "dina.travis@venoflex.ca", + "phone": "+1 (921) 485-3865", + "address": "366 Tillary Street, Century, New York, 6335", + "about": "Dolor sunt culpa enim sint officia sint id do ut anim ex in. Dolore est irure aliquip nulla laborum aliqua tempor id ea mollit in ad deserunt. Qui ullamco duis qui elit excepteur. Aute proident duis veniam enim commodo non minim id. Consequat anim eiusmod consectetur ut et labore officia ex ad cillum occaecat. Sit irure officia veniam sint et consequat reprehenderit officia qui. Aute esse nulla ad quis reprehenderit duis.\r\n", + "registered": "Friday, September 12, 2014 5:53 AM", + "latitude": 24.764352, + "longitude": 148.493552, + "tags": [ + "qui", + "officia", + "dolor", + "velit", + "ex", + "veniam", + "ullamco" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sharpe Foster" + }, + { + "id": 1, + "name": "Kathrine Ayers" + }, + { + "id": 2, + "name": "Cox Acosta" + } + ], + "greeting": "Hello, Dina! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82101996fc944f7f215", + "index": 45, + "guid": "46b331f8-78d3-403f-8c93-cbaac9438998", + "isActive": false, + "balance": "$2,193.84", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Haney", + "last": "Garrett" + }, + "company": "EZENTIA", + "email": "haney.garrett@ezentia.org", + "phone": "+1 (965) 596-2629", + "address": "162 Village Road, Southmont, Virginia, 6673", + "about": "Laboris do veniam exercitation officia id eu minim irure Lorem laborum. Sint magna aliquip ad elit eiusmod cillum laborum. Adipisicing aliquip nulla mollit ipsum cupidatat nisi duis irure ullamco. Et elit nulla nisi culpa mollit esse in. Dolore non nulla anim magna enim aliquip quis amet non tempor incididunt dolor.\r\n", + "registered": "Monday, August 18, 2014 3:42 PM", + "latitude": 6.549909, + "longitude": 32.226887, + "tags": [ + "laboris", + "duis", + "culpa", + "qui", + "dolor", + "esse", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mariana Sharp" + }, + { + "id": 1, + "name": "Sue Baldwin" + }, + { + "id": 2, + "name": "Ross Arnold" + } + ], + "greeting": "Hello, Haney! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821453cc33bd1abd21d", + "index": 46, + "guid": "313f0c45-6eaf-46d9-a28d-ca31e79d2c1c", + "isActive": false, + "balance": "$2,303.36", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "blue", + "name": { + "first": "Gale", + "last": "Robles" + }, + "company": "NEWCUBE", + "email": "gale.robles@newcube.biz", + "phone": "+1 (996) 558-2811", + "address": "597 Java Street, Sanborn, North Dakota, 1789", + "about": "Anim fugiat do nisi dolor sunt consequat irure quis laborum. Nisi cupidatat dolore excepteur irure ea minim proident excepteur exercitation ut voluptate deserunt. Ad do amet id voluptate enim commodo ex. Sunt sint quis sint aute do ea aliqua. Enim ullamco dolore proident qui mollit irure consequat. Nostrud sunt adipisicing elit incididunt do laboris ad officia ea amet id reprehenderit nulla.\r\n", + "registered": "Tuesday, July 15, 2014 2:25 PM", + "latitude": -21.549196, + "longitude": -97.373962, + "tags": [ + "ullamco", + "amet", + "sint", + "elit", + "tempor", + "ex", + "pariatur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bell Gould" + }, + { + "id": 1, + "name": "Denise Kirby" + }, + { + "id": 2, + "name": "Hess Hinton" + } + ], + "greeting": "Hello, Gale! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821ed643a410e2d79dc", + "index": 47, + "guid": "2620cb2b-df00-4303-a5ff-768ffb1697c5", + "isActive": true, + "balance": "$2,890.73", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "blue", + "name": { + "first": "Alisha", + "last": "Hamilton" + }, + "company": "FITCORE", + "email": "alisha.hamilton@fitcore.io", + "phone": "+1 (853) 468-3192", + "address": "257 Bayard Street, Coldiron, Oklahoma, 9228", + "about": "Sunt nostrud sunt magna amet excepteur est tempor veniam aliqua. Laboris id aliquip fugiat exercitation dolore veniam et anim duis sit esse ex elit ullamco. Lorem commodo exercitation in sit cillum ipsum do dolor.\r\n", + "registered": "Tuesday, March 11, 2014 12:29 PM", + "latitude": 85.840017, + "longitude": -57.093095, + "tags": [ + "consectetur", + "commodo", + "est", + "ut", + "incididunt", + "elit", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Steele Ellis" + }, + { + "id": 1, + "name": "Serena Emerson" + }, + { + "id": 2, + "name": "Betty Langley" + } + ], + "greeting": "Hello, Alisha! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8215c6db03112e3e13d", + "index": 48, + "guid": "28b59ead-0fd8-480b-8eb8-2d75290934f6", + "isActive": false, + "balance": "$3,497.68", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "blue", + "name": { + "first": "Kaufman", + "last": "Williams" + }, + "company": "COMBOGENE", + "email": "kaufman.williams@combogene.net", + "phone": "+1 (820) 465-3213", + "address": "353 Clarendon Road, Wilmington, Michigan, 3811", + "about": "Irure id sint elit mollit occaecat occaecat veniam elit reprehenderit esse officia cillum. Aute aute occaecat ipsum commodo laborum adipisicing fugiat aliquip dolore. Deserunt id excepteur enim eu adipisicing nulla ut non est dolore est. Culpa magna et sit et non ex.\r\n", + "registered": "Sunday, September 7, 2014 5:57 AM", + "latitude": 10.667631, + "longitude": 157.707911, + "tags": [ + "consequat", + "dolor", + "deserunt", + "amet", + "Lorem", + "aliqua", + "minim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Horn Franco" + }, + { + "id": 1, + "name": "Kathleen Hickman" + }, + { + "id": 2, + "name": "Rosario Scott" + } + ], + "greeting": "Hello, Kaufman! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821045c82593b79538d", + "index": 49, + "guid": "cef964ed-4208-41eb-a8d8-916d95d18f8a", + "isActive": true, + "balance": "$3,808.37", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "blue", + "name": { + "first": "Tran", + "last": "Gallegos" + }, + "company": "CONCILITY", + "email": "tran.gallegos@concility.com", + "phone": "+1 (925) 487-2143", + "address": "969 Schermerhorn Street, Watchtower, Idaho, 413", + "about": "Velit ut enim cupidatat adipisicing culpa in non incididunt exercitation dolor pariatur. Deserunt dolor occaecat dolor officia ipsum occaecat tempor nisi. Culpa et culpa aute incididunt et labore sunt cillum nulla. Reprehenderit culpa enim laborum nostrud consectetur velit nulla consequat aliqua non exercitation sunt nulla aliqua. Labore incididunt aliqua aliqua anim duis culpa elit labore. Aliqua ipsum mollit ut sint aliquip in aute do qui amet.\r\n", + "registered": "Friday, May 9, 2014 9:24 AM", + "latitude": -49.148583, + "longitude": 4.911715, + "tags": [ + "labore", + "duis", + "proident", + "adipisicing", + "nisi", + "tempor", + "nisi" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nicole Holloway" + }, + { + "id": 1, + "name": "Minerva Beasley" + }, + { + "id": 2, + "name": "Bowers Suarez" + } + ], + "greeting": "Hello, Tran! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821fbd8e9836595f96d", + "index": 50, + "guid": "966b2f5e-5686-4381-9ab7-87bc9ac9968b", + "isActive": true, + "balance": "$1,351.67", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Mable", + "last": "Lambert" + }, + "company": "ZENSUS", + "email": "mable.lambert@zensus.biz", + "phone": "+1 (917) 404-3441", + "address": "174 Concord Street, Somerset, Nebraska, 7318", + "about": "Ad minim esse ipsum incididunt incididunt enim Lorem nulla excepteur velit fugiat ullamco amet reprehenderit. Labore velit fugiat proident enim Lorem mollit ex. Tempor voluptate cupidatat officia nostrud qui. Veniam duis voluptate deserunt commodo consectetur eiusmod qui excepteur aliquip. Exercitation laborum Lorem excepteur ipsum aliqua fugiat reprehenderit ea dolore deserunt commodo cillum ipsum eu. Ullamco quis voluptate eiusmod ea aute et pariatur consequat duis occaecat nulla aliquip.\r\n", + "registered": "Sunday, May 25, 2014 4:54 PM", + "latitude": 79.811908, + "longitude": -62.629133, + "tags": [ + "sit", + "non", + "reprehenderit", + "exercitation", + "dolor", + "labore", + "irure" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Rosie Gill" + }, + { + "id": 1, + "name": "Parrish Dean" + }, + { + "id": 2, + "name": "Annmarie Delacruz" + } + ], + "greeting": "Hello, Mable! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c85822cf149b785f", + "index": 51, + "guid": "1afd0e8f-b518-4bca-a4df-29e9b6a961d6", + "isActive": true, + "balance": "$2,628.22", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "brown", + "name": { + "first": "Sadie", + "last": "Clarke" + }, + "company": "UNISURE", + "email": "sadie.clarke@unisure.co.uk", + "phone": "+1 (905) 480-2930", + "address": "326 Gilmore Court, Hamilton, Montana, 9217", + "about": "Dolore do nisi reprehenderit consectetur in. In esse sit proident enim duis veniam quis laboris nulla cillum adipisicing veniam aute. Culpa sunt ex exercitation sit esse exercitation dolor ea enim ad est aute consequat qui. Esse esse nulla eiusmod eiusmod ullamco esse cillum aute ea id ex quis. Pariatur officia Lorem aute officia anim velit velit elit sint voluptate. Aliquip ad velit velit laboris et culpa. Do consectetur aliqua sit sunt eu anim culpa ut incididunt et.\r\n", + "registered": "Sunday, April 27, 2014 10:37 AM", + "latitude": -71.828101, + "longitude": -138.908359, + "tags": [ + "nostrud", + "amet", + "minim", + "occaecat", + "proident", + "sint", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Christa Estes" + }, + { + "id": 1, + "name": "Alana Schneider" + }, + { + "id": 2, + "name": "Frank Spears" + } + ], + "greeting": "Hello, Sadie! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821aa6b44e8d20db81c", + "index": 52, + "guid": "e0220e02-565c-424a-8834-f955ccb72f7d", + "isActive": false, + "balance": "$2,918.63", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "brown", + "name": { + "first": "Deana", + "last": "Fletcher" + }, + "company": "VISALIA", + "email": "deana.fletcher@visalia.tv", + "phone": "+1 (815) 430-2641", + "address": "347 Tehama Street, Hollins, Washington, 5953", + "about": "Est consequat id ad Lorem consequat quis ullamco minim pariatur ipsum cillum. Enim exercitation qui duis cillum ea amet ea sint proident officia dolor non. Irure culpa cillum minim officia est culpa sit.\r\n", + "registered": "Monday, May 5, 2014 1:51 AM", + "latitude": 51.516197, + "longitude": 80.400628, + "tags": [ + "ut", + "tempor", + "pariatur", + "ex", + "dolore", + "deserunt", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hansen Estrada" + }, + { + "id": 1, + "name": "Regina Munoz" + }, + { + "id": 2, + "name": "Bethany Cabrera" + } + ], + "greeting": "Hello, Deana! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82120c61930bc25e2ff", + "index": 53, + "guid": "99c62c99-ef16-4eb7-a41d-80feafca740a", + "isActive": false, + "balance": "$1,836.96", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "brown", + "name": { + "first": "Long", + "last": "Sandoval" + }, + "company": "GINKLE", + "email": "long.sandoval@ginkle.name", + "phone": "+1 (989) 541-2327", + "address": "161 Crown Street, Omar, Kentucky, 8615", + "about": "Et ea eiusmod ex consequat culpa proident. Reprehenderit proident ullamco ullamco aliquip incididunt ullamco sit proident dolore nulla fugiat sit laboris. Adipisicing ullamco laborum nulla exercitation reprehenderit irure ex.\r\n", + "registered": "Tuesday, July 29, 2014 12:14 AM", + "latitude": -20.766582, + "longitude": 74.616145, + "tags": [ + "et", + "consequat", + "duis", + "excepteur", + "sint", + "sunt", + "aliqua" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Aline Rosa" + }, + { + "id": 1, + "name": "Olivia Quinn" + }, + { + "id": 2, + "name": "Fowler Carter" + } + ], + "greeting": "Hello, Long! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821cc7ceb7da88472ed", + "index": 54, + "guid": "3a965f51-48f9-4d10-8a9b-a0b529749c93", + "isActive": true, + "balance": "$2,950.94", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "green", + "name": { + "first": "Rush", + "last": "Thornton" + }, + "company": "AQUASSEUR", + "email": "rush.thornton@aquasseur.me", + "phone": "+1 (916) 493-2777", + "address": "344 Sunnyside Avenue, Brethren, California, 9529", + "about": "Duis ut consequat eu laborum voluptate eu Lorem cillum ad in commodo adipisicing. Excepteur aliquip sint dolor voluptate cillum nisi mollit mollit laborum ex culpa adipisicing voluptate. Cupidatat do sit fugiat amet irure. Cupidatat ex ut commodo reprehenderit veniam sit est officia ad pariatur aliquip.\r\n", + "registered": "Thursday, September 25, 2014 11:09 AM", + "latitude": -61.409359, + "longitude": -87.414208, + "tags": [ + "adipisicing", + "consequat", + "magna", + "Lorem", + "consequat", + "voluptate", + "eu" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hill Good" + }, + { + "id": 1, + "name": "Hartman Rice" + }, + { + "id": 2, + "name": "Petersen Hogan" + } + ], + "greeting": "Hello, Rush! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821e221eb5b7ed845c0", + "index": 55, + "guid": "ab0f1517-8c25-46ff-a281-0dcc932e2f9a", + "isActive": false, + "balance": "$2,070.82", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Alyce", + "last": "Perez" + }, + "company": "CAPSCREEN", + "email": "alyce.perez@capscreen.us", + "phone": "+1 (955) 432-2025", + "address": "896 Troutman Street, Williamson, West Virginia, 6491", + "about": "In consequat quis ex sint nisi proident esse excepteur quis nostrud. Enim incididunt ullamco sint quis eiusmod qui tempor ad laboris eiusmod nulla in aliquip sit. Nostrud fugiat fugiat Lorem laboris pariatur eiusmod amet ea do irure et. Excepteur pariatur consequat exercitation amet occaecat do aliqua non deserunt nulla cupidatat tempor id. Mollit ex incididunt et nulla culpa mollit veniam qui amet in excepteur pariatur. Commodo reprehenderit tempor laborum nisi anim minim deserunt eiusmod adipisicing deserunt ut eiusmod excepteur.\r\n", + "registered": "Monday, June 23, 2014 7:37 AM", + "latitude": -4.180393, + "longitude": 21.4789, + "tags": [ + "dolore", + "officia", + "laborum", + "aliquip", + "ex", + "eu", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Rosalind Lang" + }, + { + "id": 1, + "name": "Ina Pratt" + }, + { + "id": 2, + "name": "Tamika Mercer" + } + ], + "greeting": "Hello, Alyce! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8215e315c720e186460", + "index": 56, + "guid": "10727900-9be7-46d5-8f2c-7c6834d95a25", + "isActive": false, + "balance": "$1,907.19", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "brown", + "name": { + "first": "Snider", + "last": "Johnson" + }, + "company": "IPLAX", + "email": "snider.johnson@iplax.ca", + "phone": "+1 (883) 539-3127", + "address": "424 Hancock Street, Springdale, Wyoming, 7054", + "about": "Incididunt proident amet consectetur cupidatat ex officia labore cupidatat laborum. Tempor proident officia nisi Lorem. Lorem commodo commodo ea voluptate excepteur consequat anim quis excepteur sunt officia.\r\n", + "registered": "Saturday, March 8, 2014 9:23 PM", + "latitude": 16.66716, + "longitude": 17.844641, + "tags": [ + "esse", + "est", + "eu", + "dolor", + "ea", + "voluptate", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bauer Burt" + }, + { + "id": 1, + "name": "Lowe Boyd" + }, + { + "id": 2, + "name": "Moon Garcia" + } + ], + "greeting": "Hello, Snider! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82130f82b9ee9265e5f", + "index": 57, + "guid": "ba427ee5-5013-498f-a1a2-97a71b249a6e", + "isActive": true, + "balance": "$2,070.53", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Berry", + "last": "Carver" + }, + "company": "EVENTIX", + "email": "berry.carver@eventix.org", + "phone": "+1 (907) 508-2463", + "address": "415 Havens Place, Nile, Connecticut, 6089", + "about": "Quis dolor aute consequat sunt esse dolore Lorem pariatur reprehenderit incididunt aliqua. Officia sunt aute fugiat consectetur id exercitation aliquip velit do fugiat culpa. Et ad amet exercitation veniam ipsum duis qui sunt incididunt. Eiusmod commodo esse aliquip exercitation pariatur consequat nulla nulla quis eiusmod dolor. Ut consectetur qui culpa id veniam dolore pariatur quis est cillum voluptate esse. Sunt eiusmod adipisicing mollit est tempor ipsum dolore tempor. Velit consequat dolore cillum adipisicing id nulla veniam nisi velit in magna id anim.\r\n", + "registered": "Monday, January 13, 2014 10:45 PM", + "latitude": -60.884888, + "longitude": 139.360489, + "tags": [ + "eu", + "deserunt", + "minim", + "quis", + "eiusmod", + "sint", + "dolor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tanisha Dudley" + }, + { + "id": 1, + "name": "Dale Mcgowan" + }, + { + "id": 2, + "name": "Torres Pennington" + } + ], + "greeting": "Hello, Berry! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821bc354233d50ef914", + "index": 58, + "guid": "6ab626f8-6b71-4f17-ba39-4cc97fdf4855", + "isActive": false, + "balance": "$2,822.25", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Kramer", + "last": "Berg" + }, + "company": "INTRADISK", + "email": "kramer.berg@intradisk.biz", + "phone": "+1 (901) 534-3326", + "address": "455 Bath Avenue, Hoagland, Guam, 2206", + "about": "Labore ullamco aliquip id incididunt cupidatat pariatur. In magna et aliquip consectetur dolor ullamco aliqua reprehenderit. Ad velit nisi ex culpa consequat. Culpa eiusmod incididunt pariatur esse tempor officia mollit.\r\n", + "registered": "Friday, June 13, 2014 8:45 AM", + "latitude": -43.442578, + "longitude": 69.627031, + "tags": [ + "exercitation", + "dolor", + "quis", + "laboris", + "exercitation", + "sunt", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Love Hutchinson" + }, + { + "id": 1, + "name": "Hayden Marquez" + }, + { + "id": 2, + "name": "Macdonald Hahn" + } + ], + "greeting": "Hello, Kramer! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821cf5d51b48b4bd07a", + "index": 59, + "guid": "7b339d1e-d759-4fed-9e58-fd2608cdb0f2", + "isActive": false, + "balance": "$3,836.86", + "picture": "http://placehold.it/32x32", + "age": 20, + "eyeColor": "brown", + "name": { + "first": "Joann", + "last": "Elliott" + }, + "company": "ATOMICA", + "email": "joann.elliott@atomica.io", + "phone": "+1 (992) 429-2667", + "address": "788 Willow Place, Lindisfarne, Mississippi, 3656", + "about": "Ut consequat sunt ipsum minim velit. Lorem eiusmod dolor voluptate est deserunt cupidatat ut ipsum. Et et irure Lorem laborum sint mollit pariatur elit et enim eu eu sunt. Nisi do quis proident enim irure dolore ut Lorem fugiat quis voluptate non reprehenderit dolore.\r\n", + "registered": "Monday, May 12, 2014 6:04 PM", + "latitude": 14.309335, + "longitude": 32.596666, + "tags": [ + "nulla", + "magna", + "dolore", + "incididunt", + "fugiat", + "elit", + "veniam" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mason Hurst" + }, + { + "id": 1, + "name": "Castaneda Davidson" + }, + { + "id": 2, + "name": "Rasmussen Adkins" + } + ], + "greeting": "Hello, Joann! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821876778d3d011eb7b", + "index": 60, + "guid": "a016ab64-b6dd-42bc-8b5f-dbbab7a01d2a", + "isActive": true, + "balance": "$2,795.00", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Barbara", + "last": "Nolan" + }, + "company": "FURNAFIX", + "email": "barbara.nolan@furnafix.net", + "phone": "+1 (892) 600-2820", + "address": "540 Dennett Place, Mammoth, Rhode Island, 6151", + "about": "In velit officia quis Lorem. Ex quis cillum esse deserunt consectetur et nulla tempor. Lorem reprehenderit cillum excepteur ea veniam commodo et ad ullamco. Ex elit nisi non ipsum aliqua laborum sint aliqua. Reprehenderit consectetur dolore occaecat irure incididunt sunt.\r\n", + "registered": "Monday, April 21, 2014 11:59 AM", + "latitude": 71.103088, + "longitude": -78.48592, + "tags": [ + "eu", + "ullamco", + "cillum", + "est", + "commodo", + "nisi", + "tempor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Cynthia Aguilar" + }, + { + "id": 1, + "name": "Deanna Graves" + }, + { + "id": 2, + "name": "Bertha Caldwell" + } + ], + "greeting": "Hello, Barbara! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821d6e2aaf18a0b184f", + "index": 61, + "guid": "89ca6bc0-19c2-4d13-a37c-90d02005a6bc", + "isActive": false, + "balance": "$3,134.62", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "blue", + "name": { + "first": "Penelope", + "last": "William" + }, + "company": "UTARA", + "email": "penelope.william@utara.com", + "phone": "+1 (968) 575-2395", + "address": "276 Ralph Avenue, Ezel, New Mexico, 2656", + "about": "Pariatur officia anim dolore commodo ipsum labore sint officia. Lorem culpa ea sunt non. Voluptate irure voluptate ut cupidatat nulla nostrud.\r\n", + "registered": "Monday, July 7, 2014 11:29 PM", + "latitude": -83.184502, + "longitude": -91.222471, + "tags": [ + "anim", + "incididunt", + "aliqua", + "id", + "reprehenderit", + "laboris", + "consequat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maxwell Rocha" + }, + { + "id": 1, + "name": "Roach Bryan" + }, + { + "id": 2, + "name": "Woods Daugherty" + } + ], + "greeting": "Hello, Penelope! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8217abc85acd32fb42a", + "index": 62, + "guid": "cc58e868-7934-40a2-a350-13ad47e86a56", + "isActive": true, + "balance": "$3,734.05", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Kris", + "last": "Cotton" + }, + "company": "CORPORANA", + "email": "kris.cotton@corporana.biz", + "phone": "+1 (873) 412-2513", + "address": "650 Bushwick Court, Malott, Wisconsin, 9739", + "about": "Aliquip cupidatat exercitation exercitation consectetur. Sit id excepteur ea ut laborum irure ullamco laborum irure reprehenderit nisi aute eu. Ipsum do anim ea veniam do amet pariatur. Lorem consectetur labore deserunt anim deserunt aute.\r\n", + "registered": "Thursday, February 20, 2014 7:18 AM", + "latitude": 41.787092, + "longitude": -44.032192, + "tags": [ + "ex", + "incididunt", + "ut", + "cupidatat", + "commodo", + "commodo", + "occaecat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Angela Middleton" + }, + { + "id": 1, + "name": "Hicks Douglas" + }, + { + "id": 2, + "name": "Shaffer West" + } + ], + "greeting": "Hello, Kris! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821c20dda84da819450", + "index": 63, + "guid": "6ecab19a-0268-4d43-ad43-af2e4941b2e7", + "isActive": false, + "balance": "$2,748.00", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "William", + "last": "Haney" + }, + "company": "PROSURE", + "email": "william.haney@prosure.co.uk", + "phone": "+1 (890) 508-3193", + "address": "930 Hopkins Street, Bluetown, American Samoa, 9860", + "about": "Ad aute aliquip eiusmod tempor ullamco. Et ipsum consequat consequat magna do fugiat sint proident nostrud ad fugiat commodo dolor. Est anim do laboris id esse minim do voluptate occaecat nulla esse. Veniam sit dolore aliqua pariatur quis commodo enim nisi sint excepteur pariatur.\r\n", + "registered": "Friday, January 10, 2014 2:42 PM", + "latitude": 70.057151, + "longitude": -46.509685, + "tags": [ + "pariatur", + "est", + "adipisicing", + "aute", + "in", + "ex", + "eu" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Aida Lindsey" + }, + { + "id": 1, + "name": "Harper Roberson" + }, + { + "id": 2, + "name": "Flora Woods" + } + ], + "greeting": "Hello, William! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8219387ab6d8ebd8c1d", + "index": 64, + "guid": "732dfcb2-f8ab-44bf-a11b-d98f5b589993", + "isActive": true, + "balance": "$1,010.25", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Barrera", + "last": "Sellers" + }, + "company": "KLUGGER", + "email": "barrera.sellers@klugger.tv", + "phone": "+1 (968) 510-3228", + "address": "954 Verona Place, Homeworth, Louisiana, 6862", + "about": "Lorem ex voluptate cupidatat minim officia voluptate enim proident qui mollit dolore ipsum. Non consectetur adipisicing quis consectetur. Non est Lorem ad qui nostrud aute aliqua labore exercitation ea aliquip irure dolor nisi. Excepteur anim ex exercitation velit adipisicing qui excepteur enim culpa consequat sint. Velit consectetur velit culpa eu sint irure culpa consequat anim incididunt ad amet excepteur. Non est anim id sint ipsum id officia dolor commodo dolore labore consectetur.\r\n", + "registered": "Sunday, September 14, 2014 1:44 AM", + "latitude": -88.561319, + "longitude": -44.881241, + "tags": [ + "aliquip", + "eiusmod", + "nisi", + "aliquip", + "minim", + "ullamco", + "commodo" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bond Goff" + }, + { + "id": 1, + "name": "Cathleen Hatfield" + }, + { + "id": 2, + "name": "Pansy Burke" + } + ], + "greeting": "Hello, Barrera! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821ab4d9564c853db57", + "index": 65, + "guid": "bba298e6-ebca-4334-afe9-91807ed1b672", + "isActive": true, + "balance": "$2,345.80", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Mullen", + "last": "Stephenson" + }, + "company": "POLARIUM", + "email": "mullen.stephenson@polarium.name", + "phone": "+1 (935) 461-2692", + "address": "228 Court Square, Beaulieu, Kansas, 9445", + "about": "Ex magna proident do dolore nostrud aliqua aute dolore enim mollit consectetur sunt pariatur. Ex id duis enim duis laborum do tempor proident exercitation duis. Aute dolor cillum anim incididunt voluptate. Qui proident consectetur sit laboris ex enim excepteur qui.\r\n", + "registered": "Thursday, May 1, 2014 7:57 PM", + "latitude": 34.294733, + "longitude": 138.270754, + "tags": [ + "minim", + "veniam", + "do", + "consequat", + "esse", + "sit", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bobbi Pate" + }, + { + "id": 1, + "name": "Moran Griffith" + }, + { + "id": 2, + "name": "Marian Hopkins" + } + ], + "greeting": "Hello, Mullen! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8218eb53a92791a3185", + "index": 66, + "guid": "d297bed2-0986-4d22-b120-0e04c253fb34", + "isActive": false, + "balance": "$2,586.70", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "brown", + "name": { + "first": "Leigh", + "last": "Kidd" + }, + "company": "SUNCLIPSE", + "email": "leigh.kidd@sunclipse.me", + "phone": "+1 (870) 427-3520", + "address": "156 Keen Court, Chamizal, Indiana, 4250", + "about": "Ut sunt elit irure eiusmod aliquip consectetur in. Dolore id exercitation irure consectetur. Pariatur occaecat cillum nulla cillum esse deserunt minim consectetur aliqua duis eiusmod. Ea proident aliquip cillum ullamco duis elit Lorem dolore aliqua. Do fugiat culpa reprehenderit ea eu non enim.\r\n", + "registered": "Wednesday, January 15, 2014 1:57 AM", + "latitude": 61.842523, + "longitude": -56.422447, + "tags": [ + "non", + "non", + "incididunt", + "elit", + "aliqua", + "proident", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Jean Burns" + }, + { + "id": 1, + "name": "Patel Wilkinson" + }, + { + "id": 2, + "name": "Lillie Lane" + } + ], + "greeting": "Hello, Leigh! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8211c12bb8d837e265b", + "index": 67, + "guid": "6560e2f0-6bd2-4e19-bcd1-35297f162890", + "isActive": false, + "balance": "$2,040.59", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "green", + "name": { + "first": "Perry", + "last": "Leonard" + }, + "company": "CANDECOR", + "email": "perry.leonard@candecor.us", + "phone": "+1 (945) 556-2907", + "address": "359 Barbey Street, Grandview, Alaska, 9082", + "about": "Dolor fugiat consequat reprehenderit duis duis ex consectetur ea non ut. Irure et elit et mollit consequat dolor exercitation exercitation deserunt culpa mollit nulla. Est sunt deserunt incididunt exercitation eu aliqua qui elit labore id in eu ex.\r\n", + "registered": "Tuesday, August 26, 2014 11:34 PM", + "latitude": -67.974417, + "longitude": 97.189082, + "tags": [ + "sit", + "ex", + "ullamco", + "exercitation", + "adipisicing", + "non", + "laboris" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Alissa Ramsey" + }, + { + "id": 1, + "name": "Adela Bell" + }, + { + "id": 2, + "name": "Pearl Henderson" + } + ], + "greeting": "Hello, Perry! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8219a0b8e44380bd954", + "index": 68, + "guid": "4aa2bec3-3eaa-464f-9577-27f6c65e64b7", + "isActive": false, + "balance": "$3,911.59", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Barbra", + "last": "Mejia" + }, + "company": "ANIXANG", + "email": "barbra.mejia@anixang.ca", + "phone": "+1 (987) 517-2550", + "address": "635 Franklin Street, Allensworth, Florida, 1895", + "about": "Aliqua tempor excepteur velit do exercitation laborum commodo laboris aliqua nostrud. Aute aliquip nisi nulla labore id veniam ad voluptate non eiusmod minim mollit. Minim incididunt nostrud sint ex.\r\n", + "registered": "Thursday, January 16, 2014 11:04 PM", + "latitude": 44.320545, + "longitude": -61.392889, + "tags": [ + "cupidatat", + "excepteur", + "eu", + "consectetur", + "fugiat", + "aliquip", + "deserunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Potts Church" + }, + { + "id": 1, + "name": "Dianna Valentine" + }, + { + "id": 2, + "name": "Valeria Whitney" + } + ], + "greeting": "Hello, Barbra! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821b618d050a076972c", + "index": 69, + "guid": "ef47a627-c517-4f5a-931b-d67c8a18614b", + "isActive": false, + "balance": "$1,771.53", + "picture": "http://placehold.it/32x32", + "age": 32, + "eyeColor": "brown", + "name": { + "first": "Gonzales", + "last": "Walker" + }, + "company": "EVIDENDS", + "email": "gonzales.walker@evidends.org", + "phone": "+1 (984) 510-3347", + "address": "336 Pershing Loop, Croom, Georgia, 9128", + "about": "Quis reprehenderit consectetur ad aliqua ad amet incididunt aute irure Lorem veniam. Consequat consequat ut reprehenderit officia cupidatat irure aliqua nostrud veniam velit aliquip magna elit. Ullamco amet nostrud est cupidatat adipisicing fugiat magna anim eu occaecat incididunt. Ut ex ut quis veniam nulla ad ea magna elit incididunt Lorem anim ipsum elit. Aliqua laborum officia magna aliqua. Est quis adipisicing cillum cupidatat sunt velit fugiat mollit exercitation cupidatat sit.\r\n", + "registered": "Saturday, June 21, 2014 9:02 PM", + "latitude": -64.407501, + "longitude": -157.742045, + "tags": [ + "pariatur", + "minim", + "consectetur", + "consequat", + "ad", + "aliqua", + "ut" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tina Patterson" + }, + { + "id": 1, + "name": "Gabriela Nielsen" + }, + { + "id": 2, + "name": "Amalia Mueller" + } + ], + "greeting": "Hello, Gonzales! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8210426ced41d67a58b", + "index": 70, + "guid": "5d045ae8-c32c-43f6-b404-30a943205f5e", + "isActive": true, + "balance": "$2,054.02", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "green", + "name": { + "first": "Clarissa", + "last": "Madden" + }, + "company": "NIQUENT", + "email": "clarissa.madden@niquent.biz", + "phone": "+1 (910) 480-3769", + "address": "637 Scholes Street, Needmore, Alabama, 9344", + "about": "Sunt nulla ad aliquip incididunt ullamco culpa laboris. Consectetur non enim in officia incididunt deserunt. Quis est consequat ipsum ad. Pariatur nostrud voluptate magna occaecat minim irure sint nostrud voluptate ea labore ullamco quis. Mollit veniam consequat commodo sunt.\r\n", + "registered": "Wednesday, January 8, 2014 4:02 AM", + "latitude": 46.115788, + "longitude": 79.731859, + "tags": [ + "reprehenderit", + "nisi", + "id", + "consectetur", + "sunt", + "nostrud", + "laboris" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mandy Buckner" + }, + { + "id": 1, + "name": "Hickman Brown" + }, + { + "id": 2, + "name": "Kemp Mclaughlin" + } + ], + "greeting": "Hello, Clarissa! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8211049b550404f3ce5", + "index": 71, + "guid": "2f9a6201-6728-4509-8d88-c0a614649311", + "isActive": true, + "balance": "$1,324.10", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Joyce", + "last": "Callahan" + }, + "company": "OPTYK", + "email": "joyce.callahan@optyk.io", + "phone": "+1 (893) 544-2327", + "address": "567 Crystal Street, Freetown, District Of Columbia, 8319", + "about": "Deserunt in nisi id consequat qui. Sunt velit proident id culpa incididunt velit aute dolore labore. Deserunt qui ea adipisicing cillum irure sit sunt excepteur quis et quis nulla dolore pariatur. Consequat ut et veniam dolor velit nulla veniam fugiat commodo velit fugiat ad veniam ad. Anim consequat labore deserunt eiusmod esse. Laborum labore eu et incididunt commodo dolore eiusmod occaecat. Nisi elit duis mollit cillum id enim.\r\n", + "registered": "Tuesday, February 4, 2014 2:38 AM", + "latitude": -76.437449, + "longitude": -169.66079, + "tags": [ + "ullamco", + "non", + "officia", + "eiusmod", + "duis", + "cupidatat", + "mollit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hendricks Logan" + }, + { + "id": 1, + "name": "Dolly Baird" + }, + { + "id": 2, + "name": "Wendi Wallace" + } + ], + "greeting": "Hello, Joyce! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82176dab97e42086b90", + "index": 72, + "guid": "9680920d-9303-471b-849e-c30e38e06d45", + "isActive": false, + "balance": "$2,696.40", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Felecia", + "last": "Gonzalez" + }, + "company": "CORIANDER", + "email": "felecia.gonzalez@coriander.net", + "phone": "+1 (923) 575-3582", + "address": "315 Borinquen Pl, Rosburg, Pennsylvania, 9619", + "about": "Laboris officia exercitation duis aliqua in sint consectetur. Ad ut labore ipsum ipsum ut culpa labore irure aute. Et exercitation tempor do dolor culpa ad ipsum pariatur adipisicing fugiat. Incididunt amet incididunt minim quis cupidatat pariatur enim fugiat reprehenderit est ipsum labore.\r\n", + "registered": "Monday, January 20, 2014 11:59 PM", + "latitude": -36.201421, + "longitude": 162.994705, + "tags": [ + "cillum", + "reprehenderit", + "non", + "est", + "tempor", + "exercitation", + "fugiat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Gould Waters" + }, + { + "id": 1, + "name": "Ramona Coffey" + }, + { + "id": 2, + "name": "Taylor Byers" + } + ], + "greeting": "Hello, Felecia! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821df55849ccd4ca74c", + "index": 73, + "guid": "01b7c49a-6dac-4b65-906b-4483de07a5e8", + "isActive": true, + "balance": "$2,037.32", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "blue", + "name": { + "first": "Stanton", + "last": "Rutledge" + }, + "company": "EXOBLUE", + "email": "stanton.rutledge@exoblue.com", + "phone": "+1 (817) 408-2566", + "address": "741 Crooke Avenue, Newry, Nevada, 8555", + "about": "Commodo adipisicing ea ipsum non irure quis excepteur. Qui laborum qui sit cupidatat dolore consectetur tempor esse occaecat cillum qui. Dolore in amet ea ex proident do nulla.\r\n", + "registered": "Thursday, August 28, 2014 8:35 PM", + "latitude": 87.299409, + "longitude": 22.167535, + "tags": [ + "commodo", + "aliqua", + "irure", + "ea", + "dolor", + "aute", + "non" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Best Klein" + }, + { + "id": 1, + "name": "Dickerson Mcknight" + }, + { + "id": 2, + "name": "Gayle Washington" + } + ], + "greeting": "Hello, Stanton! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821f3516c63c6fa05e8", + "index": 74, + "guid": "c299ecf4-0528-4280-b6b4-909801b1e9dd", + "isActive": true, + "balance": "$1,695.83", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "green", + "name": { + "first": "Thelma", + "last": "Barnett" + }, + "company": "DUOFLEX", + "email": "thelma.barnett@duoflex.biz", + "phone": "+1 (947) 416-2234", + "address": "325 Tampa Court, Zortman, Illinois, 3609", + "about": "Exercitation esse culpa enim anim. Cillum voluptate quis tempor excepteur elit aliquip consequat officia cupidatat laborum ad cupidatat. Mollit exercitation esse fugiat do do id irure tempor et duis. Ut sit reprehenderit velit sit eiusmod in officia nisi commodo magna id in. Lorem sint velit adipisicing aute.\r\n", + "registered": "Tuesday, April 22, 2014 5:11 AM", + "latitude": 20.61329, + "longitude": 48.592063, + "tags": [ + "et", + "excepteur", + "nostrud", + "ullamco", + "quis", + "occaecat", + "in" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Amy Parks" + }, + { + "id": 1, + "name": "Greene Nunez" + }, + { + "id": 2, + "name": "Janna Roth" + } + ], + "greeting": "Hello, Thelma! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821637950981c0fa4c8", + "index": 75, + "guid": "bc58930c-58d8-4223-8a98-20295ac61c4e", + "isActive": true, + "balance": "$1,836.33", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Nunez", + "last": "Freeman" + }, + "company": "ZILCH", + "email": "nunez.freeman@zilch.co.uk", + "phone": "+1 (959) 439-2497", + "address": "729 Varick Street, Marne, Tennessee, 1687", + "about": "Sunt nulla ipsum non in nulla. Dolore in fugiat in laborum veniam enim dolore cupidatat. Elit tempor ullamco id in minim excepteur et aute ut mollit aliquip qui consequat. Duis esse magna culpa aliqua ad ipsum deserunt laborum amet sint. Quis voluptate quis quis aute.\r\n", + "registered": "Monday, August 11, 2014 5:27 PM", + "latitude": 85.57657, + "longitude": -145.772132, + "tags": [ + "ullamco", + "tempor", + "et", + "non", + "magna", + "non", + "ex" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Latasha Randolph" + }, + { + "id": 1, + "name": "Neva Porter" + }, + { + "id": 2, + "name": "Drake Nicholson" + } + ], + "greeting": "Hello, Nunez! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ed9d29afc180b718", + "index": 76, + "guid": "45fb73c9-30e9-4e61-b78d-41c8f79a282e", + "isActive": false, + "balance": "$1,298.91", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "green", + "name": { + "first": "Bentley", + "last": "Reyes" + }, + "company": "ZYPLE", + "email": "bentley.reyes@zyple.tv", + "phone": "+1 (919) 510-3585", + "address": "227 Oakland Place, Farmers, Iowa, 9827", + "about": "Cillum proident eiusmod id amet anim laboris elit sint ea et non. Aliqua et reprehenderit amet est ea fugiat aute. Minim aute aliquip nulla elit. Duis ad exercitation excepteur laborum anim occaecat nulla sunt. Quis pariatur nulla Lorem consectetur proident sunt amet est et elit eu sunt. Ut irure voluptate consequat amet sint deserunt quis. Incididunt ea culpa commodo fugiat qui veniam quis Lorem incididunt dolor.\r\n", + "registered": "Wednesday, April 9, 2014 1:19 AM", + "latitude": -82.587336, + "longitude": -74.931056, + "tags": [ + "dolore", + "nisi", + "exercitation", + "ullamco", + "excepteur", + "qui", + "ut" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nadia Pearson" + }, + { + "id": 1, + "name": "Reba Frost" + }, + { + "id": 2, + "name": "Lilia Mcbride" + } + ], + "greeting": "Hello, Bentley! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821a936a115315db0b9", + "index": 77, + "guid": "a1b82517-f6a6-437a-9f2d-2c0043591cfa", + "isActive": false, + "balance": "$1,016.00", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Morales", + "last": "Shields" + }, + "company": "CORECOM", + "email": "morales.shields@corecom.name", + "phone": "+1 (814) 407-2079", + "address": "472 Coleridge Street, Shasta, Massachusetts, 501", + "about": "Ipsum elit adipisicing nostrud quis ut nisi ullamco consectetur ex laborum reprehenderit anim magna fugiat. In nulla dolor esse exercitation elit exercitation. Laborum sit esse velit magna ea irure est ut velit id nisi sint qui. Laborum voluptate amet cupidatat laborum aute in id duis irure. Enim aliqua enim magna ut consequat. Tempor est commodo elit eu et ut occaecat culpa ex ex. Ullamco cillum sint ipsum tempor anim ullamco sit pariatur excepteur dolor mollit ad quis.\r\n", + "registered": "Saturday, July 5, 2014 5:10 AM", + "latitude": -20.583102, + "longitude": -23.34973, + "tags": [ + "nulla", + "proident", + "enim", + "dolor", + "elit", + "excepteur", + "dolore" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lacey Perry" + }, + { + "id": 1, + "name": "Stokes Foley" + }, + { + "id": 2, + "name": "Trisha Morales" + } + ], + "greeting": "Hello, Morales! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8214ceb22176ac554f9", + "index": 78, + "guid": "1aa9258a-f508-4e73-adb9-5d2cce0dff79", + "isActive": true, + "balance": "$1,100.42", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "blue", + "name": { + "first": "Chandra", + "last": "Patel" + }, + "company": "PROVIDCO", + "email": "chandra.patel@providco.me", + "phone": "+1 (875) 430-3869", + "address": "543 Bainbridge Street, Kidder, Arkansas, 3624", + "about": "Magna ad tempor commodo esse labore mollit sit. Duis laborum excepteur dolor officia exercitation. Elit sunt ex voluptate anim consectetur in ullamco mollit qui non. Cupidatat ipsum et cupidatat cupidatat consequat nulla Lorem culpa minim dolore cupidatat ipsum sint.\r\n", + "registered": "Sunday, June 29, 2014 1:41 AM", + "latitude": 55.150075, + "longitude": -26.185265, + "tags": [ + "amet", + "consectetur", + "occaecat", + "cillum", + "enim", + "ut", + "occaecat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Geneva Snider" + }, + { + "id": 1, + "name": "Rose Michael" + }, + { + "id": 2, + "name": "Kirkland Mason" + } + ], + "greeting": "Hello, Chandra! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821050884ba49ad868d", + "index": 79, + "guid": "88a0d238-e3c9-4b1a-8d19-08c622f9eaae", + "isActive": false, + "balance": "$1,263.98", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Elsa", + "last": "Chambers" + }, + "company": "VICON", + "email": "elsa.chambers@vicon.us", + "phone": "+1 (940) 436-3956", + "address": "190 Albemarle Terrace, Sheatown, Hawaii, 2654", + "about": "Est do esse elit consectetur elit. Aliqua esse duis est sint non. Enim minim laborum ad duis. Proident laboris quis ea amet nulla occaecat ex laboris duis ut velit.\r\n", + "registered": "Saturday, April 19, 2014 3:28 AM", + "latitude": 62.679122, + "longitude": 95.229313, + "tags": [ + "magna", + "ipsum", + "Lorem", + "ut", + "duis", + "elit", + "sit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Laurie Fuentes" + }, + { + "id": 1, + "name": "Juana Blevins" + }, + { + "id": 2, + "name": "Virginia Hester" + } + ], + "greeting": "Hello, Elsa! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821eb0dd28cfdc525a9", + "index": 80, + "guid": "e1574006-8d49-4399-8b59-92eaa0ed2be1", + "isActive": true, + "balance": "$2,395.40", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "brown", + "name": { + "first": "Morris", + "last": "Trujillo" + }, + "company": "ZOGAK", + "email": "morris.trujillo@zogak.ca", + "phone": "+1 (919) 553-3453", + "address": "127 Clove Road, Keyport, Puerto Rico, 5969", + "about": "Minim do veniam non elit excepteur enim sunt excepteur non amet. Duis eu consequat adipisicing nostrud ad cupidatat tempor occaecat. Ea id consectetur non anim. Irure dolor qui excepteur anim excepteur adipisicing ea sint id aliquip consequat eu id. Ea exercitation officia nostrud ipsum. Voluptate sunt irure aliqua tempor pariatur irure labore ea amet. Quis reprehenderit aliqua pariatur esse id anim laborum ullamco amet.\r\n", + "registered": "Sunday, February 2, 2014 12:52 PM", + "latitude": -86.963921, + "longitude": -157.636932, + "tags": [ + "occaecat", + "amet", + "ex", + "ea", + "exercitation", + "aliqua", + "elit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lester Watkins" + }, + { + "id": 1, + "name": "Kellie Clayton" + }, + { + "id": 2, + "name": "Valencia Edwards" + } + ], + "greeting": "Hello, Morris! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821f5f0ae13a893ce36", + "index": 81, + "guid": "9dd75e1e-8f3c-473e-800e-518254719ca1", + "isActive": true, + "balance": "$1,956.52", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "brown", + "name": { + "first": "Gwen", + "last": "Park" + }, + "company": "INTERGEEK", + "email": "gwen.park@intergeek.org", + "phone": "+1 (830) 581-3561", + "address": "514 Beayer Place, Maxville, New Hampshire, 4162", + "about": "Consequat labore commodo nulla veniam aliqua. Tempor ipsum officia exercitation amet elit dolor labore eu voluptate cillum reprehenderit exercitation proident. Id cillum laborum cupidatat reprehenderit anim cillum exercitation culpa aliqua deserunt cupidatat. In proident ea eu nisi est enim. Quis dolor sit aliquip reprehenderit in id ipsum proident duis. Sit eu sint nisi velit. Minim elit nostrud aliquip anim.\r\n", + "registered": "Wednesday, September 17, 2014 9:38 AM", + "latitude": -66.199245, + "longitude": -52.824656, + "tags": [ + "ex", + "magna", + "incididunt", + "veniam", + "mollit", + "eu", + "sunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nieves Potter" + }, + { + "id": 1, + "name": "Welch Reeves" + }, + { + "id": 2, + "name": "Hardy Forbes" + } + ], + "greeting": "Hello, Gwen! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a20d620a7ec793ac", + "index": 82, + "guid": "1ae142ff-8d6a-4e72-a3f1-3ac4349ad0b7", + "isActive": false, + "balance": "$2,560.11", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Erickson", + "last": "Lancaster" + }, + "company": "SCENTRIC", + "email": "erickson.lancaster@scentric.biz", + "phone": "+1 (980) 465-3465", + "address": "412 Jackson Street, Chumuckla, South Carolina, 3216", + "about": "Id laborum consectetur pariatur non nulla incididunt labore magna minim duis. Ipsum laboris deserunt velit sunt voluptate. Laboris ipsum duis aliquip non aliqua. Commodo veniam mollit voluptate elit nisi nostrud laboris dolor tempor pariatur duis laborum.\r\n", + "registered": "Wednesday, May 7, 2014 8:50 PM", + "latitude": -57.590749, + "longitude": 117.31662, + "tags": [ + "ad", + "elit", + "non", + "fugiat", + "laborum", + "incididunt", + "enim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Reeves Nguyen" + }, + { + "id": 1, + "name": "Jo Christian" + }, + { + "id": 2, + "name": "Myers Lowe" + } + ], + "greeting": "Hello, Erickson! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8213747fd3443999762", + "index": 83, + "guid": "ec870637-4e65-49c4-abfb-7b99c2d2f094", + "isActive": true, + "balance": "$2,079.21", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "blue", + "name": { + "first": "Saundra", + "last": "Kemp" + }, + "company": "LUNCHPOD", + "email": "saundra.kemp@lunchpod.io", + "phone": "+1 (950) 433-2550", + "address": "593 Stuart Street, Edenburg, Texas, 6251", + "about": "Ipsum proident et duis reprehenderit in minim in sint dolore enim aute excepteur cillum eiusmod. Do nulla deserunt quis adipisicing sunt reprehenderit. Dolor pariatur tempor sint ullamco.\r\n", + "registered": "Tuesday, June 24, 2014 6:44 AM", + "latitude": 81.565149, + "longitude": -92.061448, + "tags": [ + "magna", + "commodo", + "esse", + "ad", + "labore", + "amet", + "mollit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tommie Mays" + }, + { + "id": 1, + "name": "Lou Hubbard" + }, + { + "id": 2, + "name": "Jenna Gentry" + } + ], + "greeting": "Hello, Saundra! You have 9 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821524a5fd2a5f037a2", + "index": 84, + "guid": "e7894fe9-efd4-44ae-afd5-49bfde09b000", + "isActive": true, + "balance": "$2,320.08", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "green", + "name": { + "first": "Christensen", + "last": "Wolf" + }, + "company": "GLUKGLUK", + "email": "christensen.wolf@glukgluk.net", + "phone": "+1 (937) 519-3107", + "address": "460 Porter Avenue, Irwin, South Dakota, 2498", + "about": "Ullamco reprehenderit non aliquip amet do deserunt nostrud ea deserunt fugiat. Commodo pariatur est officia dolor dolore in excepteur pariatur laborum ut. Occaecat cillum ullamco nulla eu esse non nisi pariatur ipsum amet do ea ad culpa. Excepteur esse elit laborum deserunt ad consequat. Culpa esse esse nostrud commodo laborum officia sint ea mollit. Dolor culpa pariatur fugiat cillum quis proident non enim esse pariatur duis adipisicing amet. Consequat minim aliqua enim excepteur.\r\n", + "registered": "Saturday, February 8, 2014 10:20 PM", + "latitude": 22.478522, + "longitude": 30.070646, + "tags": [ + "quis", + "ad", + "adipisicing", + "exercitation", + "non", + "eu", + "anim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bright Moon" + }, + { + "id": 1, + "name": "Stephenson Sears" + }, + { + "id": 2, + "name": "Fletcher Swanson" + } + ], + "greeting": "Hello, Christensen! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82160fa0826e90b747b", + "index": 85, + "guid": "af08cc83-e8c9-4bcd-84b3-2f3251cf9a02", + "isActive": true, + "balance": "$2,575.60", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "brown", + "name": { + "first": "Weaver", + "last": "Parker" + }, + "company": "RETROTEX", + "email": "weaver.parker@retrotex.com", + "phone": "+1 (951) 411-2545", + "address": "560 Madison Place, Sidman, Ohio, 5153", + "about": "Reprehenderit esse dolor tempor consectetur occaecat exercitation consectetur irure commodo. Et eiusmod aute ipsum eu commodo qui id anim proident. Excepteur labore laboris aliqua incididunt ut nisi consequat deserunt dolore officia velit sint consectetur. Laboris sint minim mollit duis. Excepteur nostrud incididunt aute consequat ad magna eu quis. Id dolor eu aliqua deserunt cillum sint ea et nostrud. Lorem amet cillum nulla tempor commodo mollit aliquip do est enim enim.\r\n", + "registered": "Thursday, February 6, 2014 12:55 PM", + "latitude": 87.778566, + "longitude": -122.687026, + "tags": [ + "eu", + "voluptate", + "commodo", + "magna", + "ullamco", + "nulla", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Ratliff Mckinney" + }, + { + "id": 1, + "name": "Walker Frye" + }, + { + "id": 2, + "name": "Mcgowan Daniel" + } + ], + "greeting": "Hello, Weaver! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8217a543d20bd4b10c1", + "index": 86, + "guid": "5b670ab7-4ee6-4b8e-b379-4e1849b6e329", + "isActive": false, + "balance": "$3,114.39", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Annabelle", + "last": "Sanders" + }, + "company": "PHORMULA", + "email": "annabelle.sanders@phormula.biz", + "phone": "+1 (982) 489-2678", + "address": "970 Llama Court, Moraida, Maine, 8694", + "about": "Nostrud adipisicing magna nulla magna voluptate duis eu voluptate cupidatat ut dolore excepteur esse dolor. Aliquip exercitation occaecat amet excepteur sit. Velit adipisicing esse labore veniam duis ullamco in ea. Adipisicing eiusmod cillum veniam nostrud sint laboris sit id officia. Esse esse anim sint do ea id. Esse ipsum mollit sit laborum nostrud mollit nulla id.\r\n", + "registered": "Tuesday, January 7, 2014 7:34 AM", + "latitude": 9.515348, + "longitude": -99.138606, + "tags": [ + "ipsum", + "sint", + "dolor", + "laborum", + "est", + "consequat", + "magna" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Juliet Clements" + }, + { + "id": 1, + "name": "Jeannine Pruitt" + }, + { + "id": 2, + "name": "Chambers Warren" + } + ], + "greeting": "Hello, Annabelle! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82179c1fe89e1ccec4d", + "index": 87, + "guid": "31ecaae4-6443-4c00-a20d-82100c49b488", + "isActive": true, + "balance": "$3,803.83", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Kelley", + "last": "Miles" + }, + "company": "AQUASURE", + "email": "kelley.miles@aquasure.co.uk", + "phone": "+1 (819) 529-2967", + "address": "680 Monument Walk, Wakulla, Vermont, 8903", + "about": "Ea sint dolor nostrud dolor id commodo esse nisi. Reprehenderit minim dolore nostrud sint incididunt excepteur reprehenderit enim velit velit. Proident officia velit Lorem dolore ullamco occaecat.\r\n", + "registered": "Saturday, May 3, 2014 12:23 AM", + "latitude": 73.767872, + "longitude": -118.631186, + "tags": [ + "consectetur", + "irure", + "nostrud", + "nostrud", + "aliquip", + "quis", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maynard Townsend" + }, + { + "id": 1, + "name": "Carlene Molina" + }, + { + "id": 2, + "name": "Mai Bentley" + } + ], + "greeting": "Hello, Kelley! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8218de20c1f1e1e93fa", + "index": 88, + "guid": "8f6b0aac-f2ba-45aa-a7c8-76c413bdeb7a", + "isActive": true, + "balance": "$1,898.86", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Mckay", + "last": "Velasquez" + }, + "company": "NORALEX", + "email": "mckay.velasquez@noralex.tv", + "phone": "+1 (973) 599-3463", + "address": "152 Roebling Street, Mathews, Delaware, 7958", + "about": "Nostrud esse dolor excepteur cillum aliqua. Ea nulla elit minim sint non culpa id. Et ullamco aute laborum incididunt sint quis. Tempor tempor aliqua in sunt. Minim elit dolor quis excepteur exercitation adipisicing. Pariatur incididunt tempor irure proident exercitation deserunt sint.\r\n", + "registered": "Monday, August 4, 2014 5:00 AM", + "latitude": 81.463276, + "longitude": -66.291508, + "tags": [ + "nisi", + "anim", + "qui", + "est", + "qui", + "ipsum", + "ullamco" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Myrna Rollins" + }, + { + "id": 1, + "name": "Tricia Gilliam" + }, + { + "id": 2, + "name": "Collins Obrien" + } + ], + "greeting": "Hello, Mckay! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821b3490524365bc954", + "index": 89, + "guid": "43825015-fed6-40c8-bd80-8aaf228067f9", + "isActive": false, + "balance": "$3,035.21", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Brandy", + "last": "Hayden" + }, + "company": "OMNIGOG", + "email": "brandy.hayden@omnigog.name", + "phone": "+1 (827) 481-2334", + "address": "537 Pioneer Street, Saticoy, Palau, 803", + "about": "Ea velit consectetur ipsum ut elit pariatur id labore sunt eu incididunt est aliqua. Veniam esse officia do non officia cupidatat proident id officia esse tempor non mollit dolore. Elit voluptate nulla exercitation laboris ex ad irure est do enim aute velit aute. Cillum adipisicing nisi dolor velit duis ad fugiat deserunt non commodo Lorem fugiat sint qui. Aute consectetur magna incididunt tempor in esse consectetur magna qui sit. Culpa consequat laborum duis adipisicing dolor in deserunt ut velit ea ex dolore ullamco esse.\r\n", + "registered": "Wednesday, February 12, 2014 7:32 PM", + "latitude": 4.319892, + "longitude": 81.048442, + "tags": [ + "labore", + "ullamco", + "commodo", + "quis", + "aute", + "nulla", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Riggs Flynn" + }, + { + "id": 1, + "name": "Kidd Guerrero" + }, + { + "id": 2, + "name": "Rosario Wade" + } + ], + "greeting": "Hello, Brandy! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8211b737d086264c9a9", + "index": 90, + "guid": "ec5fe190-d16f-4728-bce1-c5140852c583", + "isActive": true, + "balance": "$2,932.98", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "green", + "name": { + "first": "Sonia", + "last": "Orr" + }, + "company": "MUSANPOLY", + "email": "sonia.orr@musanpoly.me", + "phone": "+1 (822) 422-2010", + "address": "908 Dekalb Avenue, Elfrida, Arizona, 6925", + "about": "Ut cillum ex irure amet aliquip voluptate Lorem fugiat reprehenderit sunt reprehenderit quis. Nulla laborum sunt elit ad labore ut cupidatat cillum eiusmod est. Ea irure amet excepteur mollit eu ipsum id adipisicing occaecat. Cupidatat sunt do veniam esse enim sint qui voluptate sint. Qui officia ad cupidatat mollit laboris. Duis tempor fugiat ea mollit cupidatat exercitation sunt incididunt.\r\n", + "registered": "Thursday, September 25, 2014 8:21 PM", + "latitude": 71.876999, + "longitude": 79.322401, + "tags": [ + "pariatur", + "sit", + "culpa", + "dolore", + "cupidatat", + "minim", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Morgan Pittman" + }, + { + "id": 1, + "name": "Bullock Cannon" + }, + { + "id": 2, + "name": "Lakeisha Lynch" + } + ], + "greeting": "Hello, Sonia! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821b271c92236e5e9b0", + "index": 91, + "guid": "57cff12c-2a18-48c2-b15a-3c393de707b6", + "isActive": false, + "balance": "$1,874.97", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "green", + "name": { + "first": "Stephens", + "last": "Whitaker" + }, + "company": "EARTHPLEX", + "email": "stephens.whitaker@earthplex.us", + "phone": "+1 (960) 419-2183", + "address": "368 Division Avenue, Fivepointville, Colorado, 8609", + "about": "Do aliquip laboris irure consectetur esse reprehenderit. Cillum ex deserunt fugiat ut dolore excepteur culpa eiusmod sit ullamco velit consequat consequat aliquip. Nostrud officia est enim velit fugiat laboris.\r\n", + "registered": "Monday, May 26, 2014 8:04 PM", + "latitude": 7.153481, + "longitude": 100.823578, + "tags": [ + "laborum", + "irure", + "dolore", + "enim", + "nisi", + "cillum", + "deserunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Perkins Campos" + }, + { + "id": 1, + "name": "Murray Randall" + }, + { + "id": 2, + "name": "Wallace Blackwell" + } + ], + "greeting": "Hello, Stephens! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821c115ee94a728efa4", + "index": 92, + "guid": "7bbe7ace-73f9-4d25-8f20-b50f4b9d4e60", + "isActive": true, + "balance": "$1,168.18", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Shepard", + "last": "Sanchez" + }, + "company": "YOGASM", + "email": "shepard.sanchez@yogasm.ca", + "phone": "+1 (959) 436-3299", + "address": "154 Seeley Street, Witmer, Missouri, 994", + "about": "Adipisicing ut id nulla occaecat enim officia reprehenderit non magna dolor Lorem. Culpa ad proident duis cupidatat nostrud occaecat esse elit pariatur quis Lorem. Velit exercitation anim dolore nisi labore consequat cupidatat magna nostrud sint ut deserunt enim.\r\n", + "registered": "Saturday, February 22, 2014 10:23 PM", + "latitude": -81.429217, + "longitude": -27.374426, + "tags": [ + "aliqua", + "minim", + "consequat", + "aliqua", + "qui", + "proident", + "consequat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sara Bruce" + }, + { + "id": 1, + "name": "Kimberley Mcdaniel" + }, + { + "id": 2, + "name": "Tammi England" + } + ], + "greeting": "Hello, Shepard! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a094fdd2592addbd", + "index": 93, + "guid": "69bc8858-bd55-4fa0-95f9-d2cb680a390c", + "isActive": false, + "balance": "$1,746.96", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Kristy", + "last": "Johnston" + }, + "company": "KONGLE", + "email": "kristy.johnston@kongle.org", + "phone": "+1 (823) 558-3033", + "address": "122 Cadman Plaza, Suitland, Minnesota, 3918", + "about": "Laboris excepteur ea nostrud incididunt est laborum dolor. Consequat ad duis aute proident incididunt commodo adipisicing. Enim voluptate sunt et est excepteur eiusmod commodo. Mollit fugiat reprehenderit ex ullamco magna laboris commodo mollit. Cupidatat tempor tempor minim dolore. Excepteur ipsum esse ipsum nulla.\r\n", + "registered": "Wednesday, January 29, 2014 11:15 PM", + "latitude": 15.335329, + "longitude": -78.001472, + "tags": [ + "veniam", + "consequat", + "ex", + "anim", + "culpa", + "ullamco", + "ex" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Johns Cochran" + }, + { + "id": 1, + "name": "Sheppard Hicks" + }, + { + "id": 2, + "name": "Cervantes Donovan" + } + ], + "greeting": "Hello, Kristy! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821311b6242dc75f718", + "index": 94, + "guid": "9de76ab8-2ef7-4b46-a478-5dea8732650a", + "isActive": false, + "balance": "$2,549.05", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Melendez", + "last": "Golden" + }, + "company": "GRONK", + "email": "melendez.golden@gronk.biz", + "phone": "+1 (986) 553-2124", + "address": "647 Broadway , Reno, Northern Mariana Islands, 9487", + "about": "Aliquip pariatur deserunt culpa eu nostrud eu amet. Adipisicing elit occaecat aliqua ipsum ut. Cillum magna consectetur elit esse sint laboris duis. In ea enim aute eiusmod culpa. Labore quis sunt aliquip excepteur tempor irure amet consequat eu excepteur et occaecat.\r\n", + "registered": "Monday, April 14, 2014 9:41 AM", + "latitude": 55.489443, + "longitude": 3.620039, + "tags": [ + "pariatur", + "magna", + "quis", + "in", + "irure", + "amet", + "esse" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hilary Dennis" + }, + { + "id": 1, + "name": "Tyler Burgess" + }, + { + "id": 2, + "name": "Eugenia Donaldson" + } + ], + "greeting": "Hello, Melendez! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c394a9871527c834", + "index": 95, + "guid": "ec0f31d4-7264-4649-9af9-2716082915d3", + "isActive": true, + "balance": "$2,810.24", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": { + "first": "Eleanor", + "last": "Curtis" + }, + "company": "ZAPPIX", + "email": "eleanor.curtis@zappix.io", + "phone": "+1 (824) 461-3776", + "address": "981 Lyme Avenue, Oneida, Marshall Islands, 6015", + "about": "Commodo labore do pariatur exercitation voluptate ea velit velit qui ex duis. Commodo est excepteur ad labore aute enim eu. Adipisicing irure exercitation sint consectetur exercitation occaecat aute exercitation commodo.\r\n", + "registered": "Monday, May 5, 2014 7:51 AM", + "latitude": -5.604007, + "longitude": -125.532894, + "tags": [ + "aliqua", + "commodo", + "non", + "magna", + "quis", + "velit", + "irure" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Monica Carr" + }, + { + "id": 1, + "name": "Coleman Simpson" + }, + { + "id": 2, + "name": "Wilma Fisher" + } + ], + "greeting": "Hello, Eleanor! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821a5b5e35d24f265b3", + "index": 96, + "guid": "c865c0ff-8505-4d02-af0a-c5a275d03fd5", + "isActive": true, + "balance": "$3,192.24", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Terrell", + "last": "Greene" + }, + "company": "AUTOGRATE", + "email": "terrell.greene@autograte.net", + "phone": "+1 (933) 557-3825", + "address": "435 Bridgewater Street, Fresno, New Jersey, 8124", + "about": "Elit sit ut anim reprehenderit anim ipsum esse tempor aliqua id ullamco. Exercitation est velit aliquip est elit mollit magna velit. Elit eiusmod esse voluptate consectetur enim id exercitation adipisicing et laborum. Irure fugiat ut sint exercitation dolor nostrud qui mollit eiusmod cupidatat. Commodo ad dolore incididunt ex quis nostrud veniam pariatur aliquip ea reprehenderit reprehenderit.\r\n", + "registered": "Sunday, September 21, 2014 3:31 AM", + "latitude": 76.638623, + "longitude": 147.966829, + "tags": [ + "aliquip", + "qui", + "nisi", + "ut", + "non", + "proident", + "et" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lucile Jordan" + }, + { + "id": 1, + "name": "Bender Sheppard" + }, + { + "id": 2, + "name": "Milagros Francis" + } + ], + "greeting": "Hello, Terrell! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8216cc444257e522ec0", + "index": 97, + "guid": "90c19881-6521-4012-aba7-f4d143953965", + "isActive": false, + "balance": "$1,509.96", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Holman", + "last": "Hines" + }, + "company": "NORSUP", + "email": "holman.hines@norsup.com", + "phone": "+1 (925) 565-2825", + "address": "902 Monroe Street, Marienthal, Virgin Islands, 2354", + "about": "Eu cupidatat consectetur labore voluptate nulla non amet incididunt labore non magna tempor. Veniam et sint qui reprehenderit reprehenderit sint ut laboris elit. Pariatur in eu dolore culpa nisi laboris officia magna do velit. Cupidatat proident excepteur officia labore irure sunt elit velit dolor commodo. Minim quis sint minim non incididunt Lorem elit cupidatat adipisicing quis esse non sint et. Laborum culpa incididunt incididunt fugiat minim ex deserunt et.\r\n", + "registered": "Sunday, April 27, 2014 8:05 PM", + "latitude": 61.825518, + "longitude": -28.393161, + "tags": [ + "voluptate", + "veniam", + "Lorem", + "ex", + "in", + "est", + "exercitation" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Cecelia Chapman" + }, + { + "id": 1, + "name": "Isabella Castaneda" + }, + { + "id": 2, + "name": "Montoya Chen" + } + ], + "greeting": "Hello, Holman! You have 9 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210659aa5cdca41f7e", + "index": 98, + "guid": "81adf15d-24b6-454c-a75f-f5be12f6ac75", + "isActive": false, + "balance": "$2,564.95", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Horton", + "last": "Poole" + }, + "company": "GROK", + "email": "horton.poole@grok.biz", + "phone": "+1 (908) 520-3683", + "address": "141 Gatling Place, Hackneyville, Oregon, 2976", + "about": "Consequat nisi reprehenderit incididunt id minim cillum. Lorem reprehenderit fugiat irure dolor excepteur velit. Est nostrud culpa ut reprehenderit in duis id voluptate pariatur voluptate. Exercitation Lorem esse exercitation Lorem esse. Excepteur mollit sit ut voluptate ipsum pariatur anim sint sunt cillum sit consequat.\r\n", + "registered": "Thursday, September 18, 2014 6:11 AM", + "latitude": -3.525694, + "longitude": -103.351985, + "tags": [ + "deserunt", + "voluptate", + "cillum", + "id", + "magna", + "deserunt", + "incididunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hope Lara" + }, + { + "id": 1, + "name": "French Garner" + }, + { + "id": 2, + "name": "Stein Sykes" + } + ], + "greeting": "Hello, Horton! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821cf1b15f13d8c3938", + "index": 99, + "guid": "44533089-c11a-4656-b2d1-9ab6be887f30", + "isActive": false, + "balance": "$3,192.31", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "green", + "name": { + "first": "Wanda", + "last": "Wiggins" + }, + "company": "ZEPITOPE", + "email": "wanda.wiggins@zepitope.co.uk", + "phone": "+1 (998) 461-3780", + "address": "427 Canton Court, Heil, Utah, 8283", + "about": "Do officia et exercitation dolor esse. Ut nisi eiusmod dolore laborum ad ex mollit minim. Pariatur qui culpa ullamco ex eiusmod.\r\n", + "registered": "Tuesday, August 26, 2014 6:59 PM", + "latitude": 44.770809, + "longitude": 150.936963, + "tags": [ + "consectetur", + "mollit", + "laborum", + "ipsum", + "quis", + "cupidatat", + "in" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Kristie Cain" + }, + { + "id": 1, + "name": "Geraldine Zimmerman" + }, + { + "id": 2, + "name": "Ingrid Harper" + } + ], + "greeting": "Hello, Wanda! You have 5 unread messages.", + "favoriteFruit": "strawberry" + } +] \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000000..b3d262958f8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000000..0a8448ce9f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,87 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +THIS PACKAGE IS NOT CONSIDERED STABLE AND API OR ENCODING MAY CHANGE IN THE FUTURE. + +## News + + * Mar 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000000..7d0903c7010 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,115 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + b.fill() + b.fill() + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // Do single re-slice to avoid bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000000..ec0c3fc53b5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,186 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + b.nBits += enc.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000000..50bcdf6ea99 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000000..9531d86df40 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,608 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + + if s.Reuse == ReusePolicyPrefer && canReuse { + keepTable := s.cTable + s.cTable = s.prevTable + s.Out, err = compressor(in) + s.cTable = keepTable + if err == nil && len(s.Out) < len(in) { + s.OutData = s.Out + return s.Out, true, nil + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + s.optimalTableLog() + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= len(in) { + // Retain cTable even if we re-use. + keepTable := s.cTable + s.cTable = s.prevTable + s.Out, err = compressor(in) + s.cTable = keepTable + if len(s.Out) >= len(in) { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + s.cTable.write(s) + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= len(in) { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.cTable = s.cTable, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + if s.actualTableLog <= 8 { + n -= 4 + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encSymbol(cTable, tmp[3]) + bw.encSymbol(cTable, tmp[2]) + bw.encSymbol(cTable, tmp[1]) + bw.encSymbol(cTable, tmp[0]) + } + } else { + n -= 4 + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encSymbol(cTable, tmp[3]) + bw.encSymbol(cTable, tmp[2]) + bw.flush32() + bw.encSymbol(cTable, tmp[1]) + bw.encSymbol(cTable, tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + + s.huffSort() + s.cTable = s.cTable[:s.symbolLen] + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := int16(startNode) + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [tableLogMax + 1]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + // TODO: changed `s.symbolLen` -> `nonNullRank+1` (micro-opt) + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + for n, val := range s.cTable[:s.symbolLen] { + v := valPerRank[val.nBits] + s.cTable[n].val = v + valPerRank[val.nBits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + for n := 30; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } + return +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.TableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := uint8(maxNbBits) + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress_test.go b/vendor/github.com/klauspost/compress/huff0/compress_test.go new file mode 100644 index 00000000000..7cc3bb1efc4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress_test.go @@ -0,0 +1,437 @@ +package huff0 + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/klauspost/compress/flate" +) + +type inputFn func() ([]byte, error) + +var testfiles = []struct { + name string + fn inputFn + err1X error + err4X error +}{ + // Digits is the digits of the irrational number e. Its decimal representation + // does not repeat, but there are only 10 possible digits, so it should be + // reasonably compressible. + {name: "digits", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/e.txt") }}, + // gettysburg.txt is a small plain text. + {name: "gettysburg", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/gettysburg.txt") }}, + // Twain is Project Gutenberg's edition of Mark Twain's classic English novel. + {name: "twain", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/Mark.Twain-Tom.Sawyer.txt") }}, + // Random bytes + {name: "random", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/sharnd.out") }, err1X: ErrIncompressible, err4X: ErrIncompressible}, + // Low entropy + {name: "low-ent.10k", fn: func() ([]byte, error) { return []byte(strings.Repeat("1221", 10000)), nil }}, + // Super Low entropy + {name: "superlow-ent-10k", fn: func() ([]byte, error) { return []byte(strings.Repeat("1", 10000) + strings.Repeat("2", 500)), nil }}, + // Zero bytes + {name: "zeroes", fn: func() ([]byte, error) { return make([]byte, 10000), nil }, err1X: ErrUseRLE, err4X: ErrUseRLE}, + {name: "crash1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash1.bin") }, err1X: ErrIncompressible, err4X: ErrIncompressible}, + {name: "crash2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash2.bin") }, err4X: ErrIncompressible}, + {name: "crash3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/crash3.bin") }, err1X: ErrIncompressible, err4X: ErrIncompressible}, + {name: "endzerobits", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/endzerobits.bin") }, err1X: nil, err4X: ErrIncompressible}, + {name: "endnonzero", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/endnonzero.bin") }, err4X: ErrIncompressible}, + {name: "case1", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case1.bin") }, err1X: nil}, + {name: "case2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case2.bin") }, err1X: nil}, + {name: "case3", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/case3.bin") }, err1X: nil}, + {name: "pngdata.001", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/pngdata.bin") }, err1X: nil}, + {name: "normcount2", fn: func() ([]byte, error) { return ioutil.ReadFile("../testdata/normcount2.bin") }, err1X: nil}, +} + +type fuzzInput struct { + name string + fn inputFn +} + +var testfilesExtended []fuzzInput + +func init() { + filepath.Walk("./fuzz/compress/corpus", func(path string, info os.FileInfo, err error) error { + if info.Size() == 0 || info.IsDir() { + return nil + } + testfilesExtended = append(testfilesExtended, fuzzInput{ + name: filepath.Base(path), + fn: func() ([]byte, error) { + return ioutil.ReadFile(path) + }, + }) + return nil + }) +} + +func TestCompress1X(t *testing.T) { + for _, test := range testfiles { + t.Run(test.name, func(t *testing.T) { + var s Scratch + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b, re, err := Compress1X(buf0, &s) + if err != test.err1X { + t.Errorf("want error %v (%T), got %v (%T)", test.err1X, test.err1X, err, err) + } + if err != nil { + t.Log(test.name, err.Error()) + return + } + if b == nil { + t.Error("got no output") + return + } + min := s.minSize(len(buf0)) + if len(s.OutData) < min { + t.Errorf("output data length (%d) below shannon limit (%d)", len(s.OutData), min) + } + if len(s.OutTable) == 0 { + t.Error("got no table definition") + } + if re { + t.Error("claimed to have re-used.") + } + if len(s.OutData) == 0 { + t.Error("got no data output") + } + t.Logf("%s: %d -> %d bytes (%.2f:1) re:%t (table: %d bytes)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)), re, len(s.OutTable)) + s.Out = nil + bRe, _, err := Compress1X(b, &s) + if err == nil { + t.Log("Could re-compress to", len(bRe)) + } + }) + } +} + +func TestCompress4X(t *testing.T) { + for _, test := range testfiles { + t.Run(test.name, func(t *testing.T) { + var s Scratch + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b, re, err := Compress4X(buf0, &s) + if err != test.err4X { + t.Errorf("want error %v (%T), got %v (%T)", test.err1X, test.err4X, err, err) + } + if err != nil { + t.Log(test.name, err.Error()) + return + } + if b == nil { + t.Error("got no output") + return + } + if len(s.OutTable) == 0 { + t.Error("got no table definition") + } + if re { + t.Error("claimed to have re-used.") + } + if len(s.OutData) == 0 { + t.Error("got no data output") + } + + t.Logf("%s: %d -> %d bytes (%.2f:1) %t (table: %d bytes)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)), re, len(s.OutTable)) + }) + } +} + +func TestCompress1XReuse(t *testing.T) { + for _, test := range testfiles { + t.Run(test.name, func(t *testing.T) { + var s Scratch + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b, re, err := Compress1X(buf0, &s) + if err != test.err1X { + t.Errorf("want error %v (%T), got %v (%T)", test.err1X, test.err1X, err, err) + } + if err != nil { + t.Log(test.name, err.Error()) + return + } + if b == nil { + t.Error("got no output") + return + } + firstData := len(s.OutData) + s.Reuse = ReusePolicyAllow + b, re, err = Compress1X(buf0, &s) + if err != nil { + t.Errorf("got secondary error %v (%T)", err, err) + return + } + if !re { + t.Error("Didn't re-use even if data was the same") + } + if len(s.OutTable) != 0 { + t.Error("got table definition, don't want any") + } + if len(s.OutData) == 0 { + t.Error("got no data output") + } + if len(b) != firstData { + t.Errorf("data length did not match first: %d, second:%d", firstData, len(b)) + } + t.Logf("%s: %d -> %d bytes (%.2f:1) %t", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)), re) + }) + } +} + +func BenchmarkDeflate(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err1X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + dec, err := flate.NewWriter(ioutil.Discard, flate.HuffmanOnly) + if err != nil { + b.Fatal(err) + } + if test.err1X != nil { + b.Skip("skipping") + } + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + dec.Reset(ioutil.Discard) + n, err := dec.Write(buf0) + if err != nil { + b.Fatal(err) + } + if n != len(buf0) { + b.Fatal("mismatch", n, len(buf0)) + } + dec.Close() + } + }) + } +} + +func BenchmarkCompress1XReuseNone(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err1X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s Scratch + s.Reuse = ReusePolicyNone + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + _, re, err := Compress1X(buf0, &s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, re, _ = Compress1X(buf0, &s) + if re { + b.Fatal("reused") + } + } + }) + } +} + +func BenchmarkCompress1XReuseAllow(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err1X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s Scratch + s.Reuse = ReusePolicyAllow + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + _, re, err := Compress1X(buf0, &s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, re, _ = Compress1X(buf0, &s) + if !re { + b.Fatal("not reused") + } + } + }) + } +} + +func BenchmarkCompress1XReusePrefer(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err1X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s Scratch + s.Reuse = ReusePolicyPrefer + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + _, re, err := Compress1X(buf0, &s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, re, _ = Compress1X(buf0, &s) + if !re { + b.Fatal("not reused") + } + } + }) + } +} + +func BenchmarkCompress4XReuseNone(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err4X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s Scratch + s.Reuse = ReusePolicyNone + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + _, re, err := Compress4X(buf0, &s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, re, _ = Compress4X(buf0, &s) + if re { + b.Fatal("reused") + } + } + }) + } +} + +func BenchmarkCompress4XReuseAllow(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err4X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s Scratch + s.Reuse = ReusePolicyAllow + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + _, re, err := Compress4X(buf0, &s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, re, _ = Compress4X(buf0, &s) + if !re { + b.Fatal("not reused") + } + } + }) + } +} + +func BenchmarkCompress4XReusePrefer(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err4X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s Scratch + s.Reuse = ReusePolicyPrefer + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + _, re, err := Compress4X(buf0, &s) + if err != test.err4X { + b.Fatal("unexpected error:", err) + } + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, re, _ = Compress4X(buf0, &s) + if !re { + b.Fatal("not reused") + } + } + }) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000000..b6ede8b5588 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,394 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + byte uint8 + nBits uint8 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) <= int(iSize) { + return s, nil, errors.New("input too small for table") + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [tableLogMax + 1]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + rankStats[v]++ + weightTotal += (1 << (v & 15)) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + + for n, w := range s.huffWeight[:s.symbolLen] { + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + byte: uint8(n), + nBits: s.actualTableLog + 1 - w, + } + for u := rankStats[w]; u < rankStats[w]+length; u++ { + s.dt.single[u] = d + } + rankStats[w] += length + } + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if len(s.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReader + err = br.init(in) + if err != nil { + return nil, err + } + s.Out = s.Out[:0] + + decode := func() byte { + val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */ + v := s.dt.single[val] + br.bitsRead += v.nBits + return v.byte + } + hasDec := func(v dEntrySingle) byte { + br.bitsRead += v.nBits + return v.byte + } + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := s.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.huffWeight[:256] + var off uint8 + + for br.off >= 8 { + br.fillFast() + tmp[off+0] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) + tmp[off+1] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) + br.fillFast() + tmp[off+2] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) + tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + } + } + + s.Out = append(s.Out, tmp[:off]...) + + for !br.finished() { + br.fill() + s.Out = append(s.Out, decode()) + } + return s.Out, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if len(s.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(in) < 6+(4*1) { + return nil, errors.New("input too small") + } + // TODO: We do not detect when we overrun a buffer, except if the last one does. + + var br [4]bitReader + start := 6 + for i := 0; i < 3; i++ { + length := int(in[i*2]) | (int(in[i*2+1]) << 8) + if start+length >= len(in) { + return nil, errors.New("truncated input (or invalid offset)") + } + err = br[i].init(in[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err = br[3].init(in[start:]) + if err != nil { + return nil, err + } + + // Prepare output + if cap(s.Out) < dstSize { + s.Out = make([]byte, 0, dstSize) + } + s.Out = s.Out[:dstSize] + // destination, offset to match first output + dstOut := s.Out + dstEvery := (dstSize + 3) / 4 + + decode := func(br *bitReader) byte { + val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */ + v := s.dt.single[val] + br.bitsRead += v.nBits + return v.byte + } + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.huffWeight[:256] + var off uint8 + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 +bigloop: + for { + for i := range br { + if br[i].off < 4 { + break bigloop + } + br[i].fillFast() + } + tmp[off] = decode(&br[0]) + tmp[off+bufoff] = decode(&br[1]) + tmp[off+bufoff*2] = decode(&br[2]) + tmp[off+bufoff*3] = decode(&br[3]) + tmp[off+1] = decode(&br[0]) + tmp[off+1+bufoff] = decode(&br[1]) + tmp[off+1+bufoff*2] = decode(&br[2]) + tmp[off+1+bufoff*3] = decode(&br[3]) + off += 2 + if off == bufoff { + copy(dstOut, tmp[:bufoff]) + copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2]) + copy(dstOut[dstEvery*2:], tmp[bufoff*2:bufoff*3]) + copy(dstOut[dstEvery*3:], tmp[bufoff*3:bufoff*4]) + off = 0 + dstOut = dstOut[bufoff:] + // There must at least be 3 buffers left. + if len(dstOut) < dstEvery*3+3 { + return nil, errors.New("corruption detected: stream overrun") + } + } + } + if off > 0 { + ioff := int(off) + if len(dstOut) < dstEvery*3-ioff { + return nil, errors.New("corruption detected: stream overrun") + } + copy(dstOut, tmp[:off]) + copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2]) + copy(dstOut[dstEvery*2:dstEvery*2+ioff], tmp[bufoff*2:bufoff*3]) + copy(dstOut[dstEvery*3:dstEvery*3+ioff], tmp[bufoff*3:bufoff*4]) + dstOut = dstOut[off:] + } + + for i := range br { + offset := dstEvery * i + br := &br[i] + for !br.finished() { + br.fill() + if offset >= len(dstOut) { + return nil, errors.New("corruption detected: stream overrun") + } + dstOut[offset] = decode(br) + offset++ + } + err = br.close() + if err != nil { + return nil, err + } + } + + return s.Out, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1< 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if dec.nBits != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, dec.nBits) + errs++ + } + if dec.byte != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, dec.byte) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_test.go b/vendor/github.com/klauspost/compress/huff0/decompress_test.go new file mode 100644 index 00000000000..579ab399cd5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_test.go @@ -0,0 +1,482 @@ +package huff0 + +import ( + "bytes" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestDecompress1X(t *testing.T) { + for _, test := range testfiles { + t.Run(test.name, func(t *testing.T) { + var s = &Scratch{} + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b, re, err := Compress1X(buf0, s) + if err != test.err1X { + t.Errorf("want error %v (%T), got %v (%T)", test.err1X, test.err1X, err, err) + } + if err != nil { + t.Log(test.name, err.Error()) + return + } + if b == nil { + t.Error("got no output") + return + } + if len(s.OutTable) == 0 { + t.Error("got no table definition") + } + if re { + t.Error("claimed to have re-used.") + } + if len(s.OutData) == 0 { + t.Error("got no data output") + } + + wantRemain := len(s.OutData) + t.Logf("%s: %d -> %d bytes (%.2f:1) %t (table: %d bytes)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)), re, len(s.OutTable)) + + s.Out = nil + var remain []byte + s, remain, err = ReadTable(b, s) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + if s.matches(s.prevTable, &buf); buf.Len() > 0 { + t.Error(buf.String()) + } + if len(remain) != wantRemain { + t.Fatalf("remain mismatch, want %d, got %d bytes", wantRemain, len(remain)) + } + t.Logf("remain: %d bytes, ok", len(remain)) + dc, err := s.Decompress1X(remain) + if err != nil { + t.Error(err) + return + } + if len(buf0) != len(dc) { + t.Errorf(test.name+"decompressed, want size: %d, got %d", len(buf0), len(dc)) + if len(buf0) > len(dc) { + buf0 = buf0[:len(dc)] + } else { + dc = dc[:len(buf0)] + } + if !cmp.Equal(buf0, dc) { + if len(dc) > 1024 { + t.Log(string(dc[:1024])) + t.Errorf(test.name+"decompressed, got delta: \n(in)\t%02x !=\n(out)\t%02x\n", buf0[:1024], dc[:1024]) + } else { + t.Log(string(dc)) + t.Errorf(test.name+"decompressed, got delta: (in) %v != (out) %v\n", buf0, dc) + } + } + return + } + if !cmp.Equal(buf0, dc) { + if len(buf0) > 1024 { + t.Log(string(dc[:1024])) + } else { + t.Log(string(dc)) + } + //t.Errorf(test.name+": decompressed, got delta: \n%s") + t.Errorf(test.name + ": decompressed, got delta") + } + if !t.Failed() { + t.Log("... roundtrip ok!") + } + }) + } +} + +func TestDecompress4X(t *testing.T) { + for _, test := range testfiles { + t.Run(test.name, func(t *testing.T) { + var s = &Scratch{} + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b, re, err := Compress4X(buf0, s) + if err != test.err4X { + t.Errorf("want error %v (%T), got %v (%T)", test.err1X, test.err1X, err, err) + } + if err != nil { + t.Log(test.name, err.Error()) + return + } + if b == nil { + t.Error("got no output") + return + } + if len(s.OutTable) == 0 { + t.Error("got no table definition") + } + if re { + t.Error("claimed to have re-used.") + } + if len(s.OutData) == 0 { + t.Error("got no data output") + } + + wantRemain := len(s.OutData) + t.Logf("%s: %d -> %d bytes (%.2f:1) %t (table: %d bytes)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)), re, len(s.OutTable)) + + s.Out = nil + var remain []byte + s, remain, err = ReadTable(b, s) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + if s.matches(s.prevTable, &buf); buf.Len() > 0 { + t.Error(buf.String()) + } + if len(remain) != wantRemain { + t.Fatalf("remain mismatch, want %d, got %d bytes", wantRemain, len(remain)) + } + t.Logf("remain: %d bytes, ok", len(remain)) + dc, err := s.Decompress4X(remain, len(buf0)) + if err != nil { + t.Error(err) + return + } + if len(buf0) != len(dc) { + t.Errorf(test.name+"decompressed, want size: %d, got %d", len(buf0), len(dc)) + if len(buf0) > len(dc) { + buf0 = buf0[:len(dc)] + } else { + dc = dc[:len(buf0)] + } + if !cmp.Equal(buf0, dc) { + if len(dc) > 1024 { + t.Log(string(dc[:1024])) + t.Errorf(test.name+"decompressed, got delta: \n(in)\t%02x !=\n(out)\t%02x\n", buf0[:1024], dc[:1024]) + } else { + t.Log(string(dc)) + t.Errorf(test.name+"decompressed, got delta: (in) %v != (out) %v\n", buf0, dc) + } + } + return + } + if !cmp.Equal(buf0, dc) { + if len(buf0) > 1024 { + t.Log(string(dc[:1024])) + } else { + t.Log(string(dc)) + } + //t.Errorf(test.name+": decompressed, got delta: \n%s") + t.Errorf(test.name + ": decompressed, got delta") + } + if !t.Failed() { + t.Log("... roundtrip ok!") + } + }) + } +} + +func TestDecompress1XFuzz(t *testing.T) { + for _, test := range testfilesExtended { + t.Run(test.name, func(t *testing.T) { + var s = &Scratch{} + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b, re, err := Compress1X(buf0, s) + if err != nil { + if err == ErrIncompressible || err == ErrUseRLE || err == ErrTooBig { + t.Log(test.name, err.Error()) + return + } + t.Error(test.name, err.Error()) + return + } + if b == nil { + t.Error("got no output") + return + } + if len(s.OutTable) == 0 { + t.Error("got no table definition") + } + if re { + t.Error("claimed to have re-used.") + } + if len(s.OutData) == 0 { + t.Error("got no data output") + } + + wantRemain := len(s.OutData) + t.Logf("%s: %d -> %d bytes (%.2f:1) %t (table: %d bytes)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)), re, len(s.OutTable)) + + s.Out = nil + var remain []byte + s, remain, err = ReadTable(b, s) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + if s.matches(s.prevTable, &buf); buf.Len() > 0 { + t.Error(buf.String()) + } + if len(remain) != wantRemain { + t.Fatalf("remain mismatch, want %d, got %d bytes", wantRemain, len(remain)) + } + t.Logf("remain: %d bytes, ok", len(remain)) + dc, err := s.Decompress1X(remain) + if err != nil { + t.Error(err) + return + } + if len(buf0) != len(dc) { + t.Errorf(test.name+"decompressed, want size: %d, got %d", len(buf0), len(dc)) + if len(buf0) > len(dc) { + buf0 = buf0[:len(dc)] + } else { + dc = dc[:len(buf0)] + } + if !cmp.Equal(buf0, dc) { + if len(dc) > 1024 { + t.Log(string(dc[:1024])) + t.Errorf(test.name+"decompressed, got delta: \n(in)\t%02x !=\n(out)\t%02x\n", buf0[:1024], dc[:1024]) + } else { + t.Log(string(dc)) + t.Errorf(test.name+"decompressed, got delta: (in) %v != (out) %v\n", buf0, dc) + } + } + return + } + if !cmp.Equal(buf0, dc) { + if len(buf0) > 1024 { + t.Log(string(dc[:1024])) + } else { + t.Log(string(dc)) + } + //t.Errorf(test.name+": decompressed, got delta: \n%s") + t.Errorf(test.name + ": decompressed, got delta") + } + if !t.Failed() { + t.Log("... roundtrip ok!") + } + }) + } +} + +func TestDecompress4XFuzz(t *testing.T) { + for _, test := range testfilesExtended { + t.Run(test.name, func(t *testing.T) { + var s = &Scratch{} + buf0, err := test.fn() + if err != nil { + t.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + b, re, err := Compress4X(buf0, s) + if err != nil { + if err == ErrIncompressible || err == ErrUseRLE || err == ErrTooBig { + t.Log(test.name, err.Error()) + return + } + t.Error(test.name, err.Error()) + return + } + if b == nil { + t.Error("got no output") + return + } + if len(s.OutTable) == 0 { + t.Error("got no table definition") + } + if re { + t.Error("claimed to have re-used.") + } + if len(s.OutData) == 0 { + t.Error("got no data output") + } + + wantRemain := len(s.OutData) + t.Logf("%s: %d -> %d bytes (%.2f:1) %t (table: %d bytes)", test.name, len(buf0), len(b), float64(len(buf0))/float64(len(b)), re, len(s.OutTable)) + + s.Out = nil + var remain []byte + s, remain, err = ReadTable(b, s) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + if s.matches(s.prevTable, &buf); buf.Len() > 0 { + t.Error(buf.String()) + } + if len(remain) != wantRemain { + t.Fatalf("remain mismatch, want %d, got %d bytes", wantRemain, len(remain)) + } + t.Logf("remain: %d bytes, ok", len(remain)) + dc, err := s.Decompress4X(remain, len(buf0)) + if err != nil { + t.Error(err) + return + } + if len(buf0) != len(dc) { + t.Errorf(test.name+"decompressed, want size: %d, got %d", len(buf0), len(dc)) + if len(buf0) > len(dc) { + buf0 = buf0[:len(dc)] + } else { + dc = dc[:len(buf0)] + } + if !cmp.Equal(buf0, dc) { + if len(dc) > 1024 { + t.Log(string(dc[:1024])) + t.Errorf(test.name+"decompressed, got delta: \n(in)\t%02x !=\n(out)\t%02x\n", buf0[:1024], dc[:1024]) + } else { + t.Log(string(dc)) + t.Errorf(test.name+"decompressed, got delta: (in) %v != (out) %v\n", buf0, dc) + } + } + return + } + if !cmp.Equal(buf0, dc) { + if len(buf0) > 1024 { + t.Log(string(dc[:1024])) + } else { + t.Log(string(dc)) + } + //t.Errorf(test.name+": decompressed, got delta: \n%s") + t.Errorf(test.name + ": decompressed, got delta") + } + if !t.Failed() { + t.Log("... roundtrip ok!") + } + }) + } +} + +func BenchmarkDecompress1XTable(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err1X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s = &Scratch{} + s.Reuse = ReusePolicyNone + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + compressed, _, err := Compress1X(buf0, s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + s.Out = nil + s, remain, _ := ReadTable(compressed, s) + s.Decompress1X(remain) + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + s, remain, err := ReadTable(compressed, s) + if err != nil { + b.Fatal(err) + } + _, err = s.Decompress1X(remain) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +func BenchmarkDecompress1XNoTable(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err1X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s = &Scratch{} + s.Reuse = ReusePolicyNone + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + compressed, _, err := Compress1X(buf0, s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + s.Out = nil + s, remain, _ := ReadTable(compressed, s) + s.Decompress1X(remain) + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, err = s.Decompress1X(remain) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +func BenchmarkDecompress4XNoTable(b *testing.B) { + for _, tt := range testfiles { + test := tt + if test.err4X != nil { + continue + } + b.Run(test.name, func(b *testing.B) { + var s = &Scratch{} + s.Reuse = ReusePolicyNone + buf0, err := test.fn() + if err != nil { + b.Fatal(err) + } + if len(buf0) > BlockSizeMax { + buf0 = buf0[:BlockSizeMax] + } + compressed, _, err := Compress4X(buf0, s) + if err != test.err1X { + b.Fatal("unexpected error:", err) + } + s.Out = nil + s, remain, _ := ReadTable(compressed, s) + s.Decompress4X(remain, len(buf0)) + b.ResetTimer() + b.ReportAllocs() + b.SetBytes(int64(len(buf0))) + for i := 0; i < b.N; i++ { + _, err = s.Decompress4X(remain, len(buf0)) + if err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/01ace8efdfd46d82b0fef511b259933ced9d13ec-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/01ace8efdfd46d82b0fef511b259933ced9d13ec-2 new file mode 100644 index 00000000000..69291ec93e9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/01ace8efdfd46d82b0fef511b259933ced9d13ec-2 @@ -0,0 +1 @@ +012939495969798997C2 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/049f4a878ab48c404bd0950e2366e3c0eaad4318-3 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/049f4a878ab48c404bd0950e2366e3c0eaad4318-3 new file mode 100644 index 00000000000..84e824506e3 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/049f4a878ab48c404bd0950e2366e3c0eaad4318-3 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/04bc4648208f40ba61ffb9a929f760c24101522c-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/04bc4648208f40ba61ffb9a929f760c24101522c-1 new file mode 100644 index 00000000000..3494057c2ee Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/04bc4648208f40ba61ffb9a929f760c24101522c-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0a7ce59229caf93ee64d5a828aecab7b5e434bbc b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0a7ce59229caf93ee64d5a828aecab7b5e434bbc new file mode 100644 index 00000000000..80da8c0d0b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0a7ce59229caf93ee64d5a828aecab7b5e434bbc @@ -0,0 +1 @@ +CaDCaDcAecDc7De7777-accD77DeDEEAf_ZohDnpPnXJKmFtxmGZFOVgbxbCaAEEAAfecbD4dc44.x.xAfF-45405050xdbCaDAEEd0EcdDA7AfecbD4dc454057025Vz-04454224054e-0276666767011776664.efZohQpggnpGPTrniXRqJlKmFtmGZFOgVMgwxAAfeB643xbBCaBABE3BEcd3AAfecbD64Bdc664423703235e-07503-0xB66C9fad.-75946554523692857652911-i1V___hoNaqWTStRM9101862645149230957031250xdbBC2aBDABEEd73801BEcdD3A7AfecbD864Bdc1862645149230957031259101862645149230957031250xdbBC2aBDABEEd73801BEcdD3A7AfecbD864Bdc186264514923095703125\B___L7D3PS0hd1_Gxkg_Eg2P_Uo_3yd3gU_a_2__Ct28_l1w16r4_p_Q898Fv_22_6_28282357FECfCFCdBEEaE47fb5Bc{1.0xd-0534420.-8911368683772161602973534420.-89113686<-7301351713.7376291319585350 [ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0ceda9053c751bf974334c7c1676832feea92d69 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0ceda9053c751bf974334c7c1676832feea92d69 new file mode 100644 index 00000000000..9e7e94e2929 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0ceda9053c751bf974334c7c1676832feea92d69 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0dde84d337ceb9269489c3919d205ea9789c158a-5 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0dde84d337ceb9269489c3919d205ea9789c158a-5 new file mode 100644 index 00000000000..1382437f3c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0dde84d337ceb9269489c3919d205ea9789c158a-5 @@ -0,0 +1 @@ + 01293949596979899; \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0e90b9893889f69194ca72e28082a1f6081c4b35-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0e90b9893889f69194ca72e28082a1f6081c4b35-1 new file mode 100644 index 00000000000..0787dae060b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0e90b9893889f69194ca72e28082a1f6081c4b35-1 @@ -0,0 +1 @@ +-08338832110233882812 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0fcf0d8fa929e950029b365ad631e876ec5f3c47-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0fcf0d8fa929e950029b365ad631e876ec5f3c47-1 new file mode 100644 index 00000000000..159b67b2a02 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/0fcf0d8fa929e950029b365ad631e876ec5f3c47-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/112b9d35e8cbeabce6fda543aec804ab83185543-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/112b9d35e8cbeabce6fda543aec804ab83185543-1 new file mode 100644 index 00000000000..9072d15df08 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/112b9d35e8cbeabce6fda543aec804ab83185543-1 @@ -0,0 +1 @@ +8869fecbD864Bdc186264514923095703125-70989128513186264514923095703125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/130efc33e0d8f0327a73110deac85474bc91dc8d-3 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/130efc33e0d8f0327a73110deac85474bc91dc8d-3 new file mode 100644 index 00000000000..82cbc78af24 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/130efc33e0d8f0327a73110deac85474bc91dc8d-3 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/1eb38d67be98350dd6ce98f72e5b1b75043ce8c7 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/1eb38d67be98350dd6ce98f72e5b1b75043ce8c7 new file mode 100644 index 00000000000..7a5facee042 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/1eb38d67be98350dd6ce98f72e5b1b75043ce8c7 @@ -0,0 +1 @@ +xhy_1P_X_Bers6vz3zZ_95_J_DSW6IkV4_woYf___aC7_p1_1gED_4_Q_28_T73eUL053440.-136868- \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/234ec5ad1fc105fcc08523ff91bd8430284aa1f3 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/234ec5ad1fc105fcc08523ff91bd8430284aa1f3 new file mode 100644 index 00000000000..684a4432be8 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/234ec5ad1fc105fcc08523ff91bd8430284aa1f3 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/26ccc59f9c4d89063210f2e8ddd0c26394c2a607-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/26ccc59f9c4d89063210f2e8ddd0c26394c2a607-2 new file mode 100644 index 00000000000..20debe4dd86 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/26ccc59f9c4d89063210f2e8ddd0c26394c2a607-2 @@ -0,0 +1 @@ +012756789aefxbcdefx \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/287d18afe5e0d54d4554a39aa2acff99d696136d b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/287d18afe5e0d54d4554a39aa2acff99d696136d new file mode 100644 index 00000000000..299c56cf463 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/287d18afe5e0d54d4554a39aa2acff99d696136d @@ -0,0 +1 @@ +8869-0x0e.-9101862645149230957031250xdbBC2aBDABEEd73801BEcdD3A7AfecbD864Bdc186264514923095703125-70989128513186264514923095703125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/29e95908e0f3e9f3e189441de6ed797b7bee2449-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/29e95908e0f3e9f3e189441de6ed797b7bee2449-2 new file mode 100644 index 00000000000..16749a6bf95 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/29e95908e0f3e9f3e189441de6ed797b7bee2449-2 @@ -0,0 +1 @@ +0345678defghijklmnopqrstuvwxyz18872161F029739CFeBEbaE7fb-05340.-13686 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2c93d4381a547aa7001acfffcf51dc323a0cdf2b-10 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2c93d4381a547aa7001acfffcf51dc323a0cdf2b-10 new file mode 100644 index 00000000000..02700fd0782 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2c93d4381a547aa7001acfffcf51dc323a0cdf2b-10 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2e0d10ca05bdebe3fe84c481004adb11a1cc761c-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2e0d10ca05bdebe3fe84c481004adb11a1cc761c-2 new file mode 100644 index 00000000000..001954fdd73 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2e0d10ca05bdebe3fe84c481004adb11a1cc761c-2 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2e85ccbaa127cbf77bb8f0b5c3afdcf53f63f2d8-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2e85ccbaa127cbf77bb8f0b5c3afdcf53f63f2d8-2 new file mode 100644 index 00000000000..e7c19ad9797 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/2e85ccbaa127cbf77bb8f0b5c3afdcf53f63f2d8-2 @@ -0,0 +1 @@ +"4656612873077392578125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/30632e96a06600a75b47b4cbe0db74b959d09aa5 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/30632e96a06600a75b47b4cbe0db74b959d09aa5 new file mode 100644 index 00000000000..4529b55e3b6 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/30632e96a06600a75b47b4cbe0db74b959d09aa5 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/308f53f64ce26d56e4379931ff3611a7dd49325f-10 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/308f53f64ce26d56e4379931ff3611a7dd49325f-10 new file mode 100644 index 00000000000..ec62c5ce57b Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/308f53f64ce26d56e4379931ff3611a7dd49325f-10 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/31e020e2f79260ddcbf377c8e1256288b57d32b8-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/31e020e2f79260ddcbf377c8e1256288b57d32b8-1 new file mode 100644 index 00000000000..2e05aff4ae7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/31e020e2f79260ddcbf377c8e1256288b57d32b8-1 @@ -0,0 +1 @@ +186264514923095703125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/32a3cf4aef238d77278831c3744c1327dcc8f8c0-3 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/32a3cf4aef238d77278831c3744c1327dcc8f8c0-3 new file mode 100644 index 00000000000..aa7650d5ede Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/32a3cf4aef238d77278831c3744c1327dcc8f8c0-3 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/350b061ff78cfb97b28716ec96e27f616d439f97-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/350b061ff78cfb97b28716ec96e27f616d439f97-1 new file mode 100644 index 00000000000..a502d68ede6 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/350b061ff78cfb97b28716ec96e27f616d439f97-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3542214eb977ea9bdabcc55601d7876b6227b501-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3542214eb977ea9bdabcc55601d7876b6227b501-2 new file mode 100644 index 00000000000..800763fc7a2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3542214eb977ea9bdabcc55601d7876b6227b501-2 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3693a58b98ececb7670844f49caa795c1735ddf0-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3693a58b98ececb7670844f49caa795c1735ddf0-1 new file mode 100644 index 00000000000..51136ddc013 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3693a58b98ececb7670844f49caa795c1735ddf0-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3b1d88a86a30b301e5f509a756f43ef34fad55b0-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3b1d88a86a30b301e5f509a756f43ef34fad55b0-2 new file mode 100644 index 00000000000..edf320c82b1 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3b1d88a86a30b301e5f509a756f43ef34fad55b0-2 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3f85a2df398429128e89cb13f51f612645550408-11 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3f85a2df398429128e89cb13f51f612645550408-11 new file mode 100644 index 00000000000..7da12c9097a Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/3f85a2df398429128e89cb13f51f612645550408-11 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/40b8de701a171fddefdc4459023121a1e605b16c b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/40b8de701a171fddefdc4459023121a1e605b16c new file mode 100644 index 00000000000..ac5355de43c --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/40b8de701a171fddefdc4459023121a1e605b16c @@ -0,0 +1 @@ +0aaef11119910xC8fFCFCEEaEfb.144-0440.-89116868771616097937988815-9101862645149230957031250xbCaBEBEDAfebDBdc32-xcA3.7475562603-0x0e0862645149230957031250xdbBC2aBDABEEd73801BEcdD3A7AfecbD864Bdc186264514923095703125|7318e000175420 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4106d42682c6b5f9f068148a3d94f5b52d9a2862-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4106d42682c6b5f9f068148a3d94f5b52d9a2862-1 new file mode 100644 index 00000000000..bf6dcb36149 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4106d42682c6b5f9f068148a3d94f5b52d9a2862-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/43726b29460c5060782f19cb0facac8443ad5d0b b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/43726b29460c5060782f19cb0facac8443ad5d0b new file mode 100644 index 00000000000..cce03e6dfb3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/43726b29460c5060782f19cb0facac8443ad5d0b @@ -0,0 +1,2 @@ +*? | " +%`Rw_Y} [QQ.=Rtw ~Pk.Q_k__k__sb_QRif_L_SrsX6i_____UmUzjWyiPsmrgXsPltOrO_____IVSLw_OSN_jnYu_umzY_Rj__v_.ddz,{hyPXrsvzzSWIkVwoY\x&xe#y%)\ICkMYJuZx_KCS_o55_nZ__7LH5_W-1-11252251225127[1162651257125xC2717e61862651257125- VZGohQpFpGtnpGPTniXq_JlK_F__t____GZ__F_Og__VMgb5955baBBBAfbB25929531253._--=x7___Tr438niX_R6q_JlK_mF__t2___4A957_AxmGZ_8_BF_Og_8_VM28cgw491186264514923957031250xbBC2aBDABEEd73801BEcdD3A7AfecbD864Bdc1862645149230957031259101862645149230957031250xdbBC2aBDABEEd73801BEcdD3A7AfecbD864Bdc1862645149230957031253.0xDe15814980304091872B9EEaf48EDccB4b5Da0< \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/43a26357597ae65521480220432ff645450d01f1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/43a26357597ae65521480220432ff645450d01f1 new file mode 100644 index 00000000000..2a895564edb Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/43a26357597ae65521480220432ff645450d01f1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/46fc2fe81fed8fdc49861d8a8433560c5a02d503-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/46fc2fe81fed8fdc49861d8a8433560c5a02d503-2 new file mode 100644 index 00000000000..73c64b9ad74 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/46fc2fe81fed8fdc49861d8a8433560c5a02d503-2 @@ -0,0 +1 @@ +1136868377216160798828125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4869e00fbc61867ccab5b83187c3a146ecb36775 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4869e00fbc61867ccab5b83187c3a146ecb36775 new file mode 100644 index 00000000000..6897c087cdd Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4869e00fbc61867ccab5b83187c3a146ecb36775 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/489d689da4860f3116a62db6b089bb0d5ba4de40 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/489d689da4860f3116a62db6b089bb0d5ba4de40 new file mode 100644 index 00000000000..3345c8763e0 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/489d689da4860f3116a62db6b089bb0d5ba4de40 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/48bc76aeb21bc6070323247927d87e591e51eaa9-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/48bc76aeb21bc6070323247927d87e591e51eaa9-1 new file mode 100644 index 00000000000..699dc96c7e5 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/48bc76aeb21bc6070323247927d87e591e51eaa9-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4959a4a9db83af17f9368cf490b8a0356d872147-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4959a4a9db83af17f9368cf490b8a0356d872147-1 new file mode 100644 index 00000000000..c139454f983 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4959a4a9db83af17f9368cf490b8a0356d872147-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/49dce428e704656d5a4f1053a443ba44f51e91f0-4 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/49dce428e704656d5a4f1053a443ba44f51e91f0-4 new file mode 100644 index 00000000000..56577e376e2 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/49dce428e704656d5a4f1053a443ba44f51e91f0-4 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4a98d52bab6ca703e4521093afa2a400e10f4a7d-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4a98d52bab6ca703e4521093afa2a400e10f4a7d-2 new file mode 100644 index 00000000000..7d255883078 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4a98d52bab6ca703e4521093afa2a400e10f4a7d-2 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4b4859f278820e969c13c975669322e2c9df9823-5 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4b4859f278820e969c13c975669322e2c9df9823-5 new file mode 100644 index 00000000000..74187530ebf Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4b4859f278820e969c13c975669322e2c9df9823-5 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4d1adbc376464b88c52a00ed562cdacbffa4af1a-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4d1adbc376464b88c52a00ed562cdacbffa4af1a-1 new file mode 100644 index 00000000000..028ba8762d9 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4d1adbc376464b88c52a00ed562cdacbffa4af1a-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4f506d462241876610a0f92a5aa1e2b38ea8722c-7 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4f506d462241876610a0f92a5aa1e2b38ea8722c-7 new file mode 100644 index 00000000000..882d595fb3e Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/4f506d462241876610a0f92a5aa1e2b38ea8722c-7 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/51bdcd2cbd098b042fddc528083572bdbcfbe449-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/51bdcd2cbd098b042fddc528083572bdbcfbe449-1 new file mode 100644 index 00000000000..a73933e681c Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/51bdcd2cbd098b042fddc528083572bdbcfbe449-1 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/51c91bfe65eaa40fe4144306d6332ad84e4bb8ed-1 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/51c91bfe65eaa40fe4144306d6332ad84e4bb8ed-1 new file mode 100644 index 00000000000..12301f17098 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/51c91bfe65eaa40fe4144306d6332ad84e4bb8ed-1 @@ -0,0 +1 @@ +GPTrniXRqJlK_mFt____AmGZ__BF_Og__VM_gw.4995325xC2aBABd731BEd3A7Aecb8dc866414295731253.0xDe9D94B9EEaf48EDccB4b5Da0-0x0e-9101862645149230957031250xdbBC2aBDABEEd73801BEcdD3A7AfecbD864Bdc186264514923095703125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/53bcc0e78275d3efe9e012e6d30412066bf354f9-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/53bcc0e78275d3efe9e012e6d30412066bf354f9-2 new file mode 100644 index 00000000000..e59672dfc23 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/53bcc0e78275d3efe9e012e6d30412066bf354f9-2 @@ -0,0 +1 @@ +4995325233449559994-09-519341245142309503125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/545828581071b0d01c3fd9d15c7850c3f83ea15f-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/545828581071b0d01c3fd9d15c7850c3f83ea15f-2 new file mode 100644 index 00000000000..b8bf81e791b Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/545828581071b0d01c3fd9d15c7850c3f83ea15f-2 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/56860900c11ef30e85ce99b14e0e687745b269ae-9 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/56860900c11ef30e85ce99b14e0e687745b269ae-9 new file mode 100644 index 00000000000..794b9115f31 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/56860900c11ef30e85ce99b14e0e687745b269ae-9 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/581ed2fb7f25a6a5a8de40beae3dfd1666c511d0-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/581ed2fb7f25a6a5a8de40beae3dfd1666c511d0-2 new file mode 100644 index 00000000000..a67655c6571 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/581ed2fb7f25a6a5a8de40beae3dfd1666c511d0-2 @@ -0,0 +1 @@ +92309570312 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5b5d4262260375deb5146e3fa43f689923e856a7 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5b5d4262260375deb5146e3fa43f689923e856a7 new file mode 100644 index 00000000000..3340915b602 Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5b5d4262260375deb5146e3fa43f689923e856a7 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5c4b6679ee378f2533b099450ed21e40d54f6cd0-3 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5c4b6679ee378f2533b099450ed21e40d54f6cd0-3 new file mode 100644 index 00000000000..e2127e696cb --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5c4b6679ee378f2533b099450ed21e40d54f6cd0-3 @@ -0,0 +1 @@ +"465663981283077397778125 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e13404b6713db5f2d4972747da143057beaaf64-2 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e13404b6713db5f2d4972747da143057beaaf64-2 new file mode 100644 index 00000000000..2189bb453ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e13404b6713db5f2d4972747da143057beaaf64-2 @@ -0,0 +1 @@ +012@456789@4567x \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e6c62abd0fe49668d26e9bfced0ef0dc699f6a5-3 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e6c62abd0fe49668d26e9bfced0ef0dc699f6a5-3 new file mode 100644 index 00000000000..90d6f0afc9e Binary files /dev/null and b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e6c62abd0fe49668d26e9bfced0ef0dc699f6a5-3 differ diff --git a/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e701f4039d099c3a15c4e8d9b070eafc96de0e9 b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e701f4039d099c3a15c4e8d9b070eafc96de0e9 new file mode 100644 index 00000000000..ff8e00b9b19 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/fuzz/compress/corpus/5e701f4039d099c3a15c4e8d9b070eafc96de0e9 @@ -0,0 +1 @@ + 0003 BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 00000000000..042091d9b3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 00000000000..fd1c6f67b36 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 00000000000..a29b1339882 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 00000000000..7e69e1a2f61 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README new file mode 100644 index 00000000000..9138fe8ee97 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/cmd/snappytool/main.cpp b/vendor/github.com/klauspost/compress/snappy/cmd/snappytool/main.cpp new file mode 100644 index 00000000000..fc31f51739a --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/cmd/snappytool/main.cpp @@ -0,0 +1,77 @@ +/* +To build the snappytool binary: +g++ main.cpp /usr/lib/libsnappy.a -o snappytool +or, if you have built the C++ snappy library from source: +g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool +after running "make" from your snappy checkout directory. +*/ + +#include +#include +#include +#include + +#include "snappy.h" + +#define N 1000000 + +char dst[N]; +char src[N]; + +int main(int argc, char** argv) { + // Parse args. + if (argc != 2) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + bool decode = strcmp(argv[1], "-d") == 0; + bool encode = strcmp(argv[1], "-e") == 0; + if (decode == encode) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + + // Read all of stdin into src[:s]. + size_t s = 0; + while (1) { + if (s == N) { + fprintf(stderr, "input too large\n"); + return 1; + } + ssize_t n = read(0, src+s, N-s); + if (n == 0) { + break; + } + if (n < 0) { + fprintf(stderr, "read error: %s\n", strerror(errno)); + // TODO: handle EAGAIN, EINTR? + return 1; + } + s += n; + } + + // Encode or decode src[:s] to dst[:d], and write to stdout. + size_t d = 0; + if (encode) { + if (N < snappy::MaxCompressedLength(s)) { + fprintf(stderr, "input too large after encoding\n"); + return 1; + } + snappy::RawCompress(src, s, dst, &d); + } else { + if (!snappy::GetUncompressedLength(src, s, &d)) { + fprintf(stderr, "could not get uncompressed length\n"); + return 1; + } + if (N < d) { + fprintf(stderr, "input too large after decoding\n"); + return 1; + } + if (!snappy::RawUncompress(src, s, dst)) { + fprintf(stderr, "input was not valid Snappy-compressed data\n"); + return 1; + } + } + write(1, dst, d); + return 0; +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 00000000000..72efb0353dd --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go new file mode 100644 index 00000000000..fcd192b849e --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 00000000000..e6179f65e35 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 00000000000..8c9f2049bc7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 00000000000..87496890609 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer than can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go new file mode 100644 index 00000000000..150d91bc8be --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 00000000000..adfd979fe27 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 00000000000..dbcae905e6e --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/snappy/golden_test.go b/vendor/github.com/klauspost/compress/snappy/golden_test.go new file mode 100644 index 00000000000..e4496f92e30 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/golden_test.go @@ -0,0 +1,1965 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +// extendMatchGoldenTestCases is the i and j arguments, and the returned value, +// for every extendMatch call issued when encoding the +// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the +// extendMatch implementation. +// +// It was generated manually by adding some print statements to the (pure Go) +// extendMatch implementation: +// +// func extendMatch(src []byte, i, j int) int { +// i0, j0 := i, j +// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { +// } +// println("{", i0, ",", j0, ",", j, "},") +// return j +// } +// +// and running "go test -test.run=EncodeGoldenInput -tags=noasm". +var extendMatchGoldenTestCases = []struct { + i, j, want int +}{ + {11, 61, 62}, + {80, 81, 82}, + {86, 87, 101}, + {85, 133, 149}, + {152, 153, 162}, + {133, 168, 193}, + {168, 207, 225}, + {81, 255, 275}, + {278, 279, 283}, + {306, 417, 417}, + {373, 428, 430}, + {389, 444, 447}, + {474, 510, 512}, + {465, 533, 533}, + {47, 547, 547}, + {307, 551, 554}, + {420, 582, 587}, + {309, 604, 604}, + {604, 625, 625}, + {538, 629, 629}, + {328, 640, 640}, + {573, 645, 645}, + {319, 657, 657}, + {30, 664, 664}, + {45, 679, 680}, + {621, 684, 684}, + {376, 700, 700}, + {33, 707, 708}, + {601, 733, 733}, + {334, 744, 745}, + {625, 758, 759}, + {382, 763, 763}, + {550, 769, 771}, + {533, 789, 789}, + {804, 813, 813}, + {342, 841, 842}, + {742, 847, 847}, + {74, 852, 852}, + {810, 864, 864}, + {758, 868, 869}, + {714, 883, 883}, + {582, 889, 891}, + {61, 934, 935}, + {894, 942, 942}, + {939, 949, 949}, + {785, 956, 957}, + {886, 978, 978}, + {792, 998, 998}, + {998, 1005, 1005}, + {572, 1032, 1032}, + {698, 1051, 1053}, + {599, 1067, 1069}, + {1056, 1079, 1079}, + {942, 1089, 1090}, + {831, 1094, 1096}, + {1088, 1100, 1103}, + {732, 1113, 1114}, + {1037, 1118, 1118}, + {872, 1128, 1130}, + {1079, 1140, 1142}, + {332, 1162, 1162}, + {207, 1168, 1186}, + {1189, 1190, 1225}, + {105, 1229, 1230}, + {79, 1256, 1257}, + {1190, 1261, 1283}, + {255, 1306, 1306}, + {1319, 1339, 1358}, + {364, 1370, 1370}, + {955, 1378, 1380}, + {122, 1403, 1403}, + {1325, 1407, 1419}, + {664, 1423, 1424}, + {941, 1461, 1463}, + {867, 1477, 1478}, + {757, 1488, 1489}, + {1140, 1499, 1499}, + {31, 1506, 1506}, + {1487, 1510, 1512}, + {1089, 1520, 1521}, + {1467, 1525, 1529}, + {1394, 1537, 1537}, + {1499, 1541, 1541}, + {367, 1558, 1558}, + {1475, 1564, 1564}, + {1525, 1568, 1571}, + {1541, 1582, 1583}, + {864, 1587, 1588}, + {704, 1597, 1597}, + {336, 1602, 1602}, + {1383, 1613, 1613}, + {1498, 1617, 1618}, + {1051, 1623, 1625}, + {401, 1643, 1645}, + {1072, 1654, 1655}, + {1067, 1667, 1669}, + {699, 1673, 1674}, + {1587, 1683, 1684}, + {920, 1696, 1696}, + {1505, 1710, 1710}, + {1550, 1723, 1723}, + {996, 1727, 1727}, + {833, 1733, 1734}, + {1638, 1739, 1740}, + {1654, 1744, 1744}, + {753, 1761, 1761}, + {1548, 1773, 1773}, + {1568, 1777, 1780}, + {1683, 1793, 1794}, + {948, 1801, 1801}, + {1666, 1805, 1808}, + {1502, 1814, 1814}, + {1696, 1822, 1822}, + {502, 1836, 1837}, + {917, 1843, 1843}, + {1733, 1854, 1855}, + {970, 1859, 1859}, + {310, 1863, 1863}, + {657, 1872, 1872}, + {1005, 1876, 1876}, + {1662, 1880, 1880}, + {904, 1892, 1892}, + {1427, 1910, 1910}, + {1772, 1929, 1930}, + {1822, 1937, 1940}, + {1858, 1949, 1950}, + {1602, 1956, 1956}, + {1150, 1962, 1962}, + {1504, 1966, 1967}, + {51, 1971, 1971}, + {1605, 1979, 1979}, + {1458, 1983, 1988}, + {1536, 2001, 2006}, + {1373, 2014, 2018}, + {1494, 2025, 2025}, + {1667, 2029, 2031}, + {1592, 2035, 2035}, + {330, 2045, 2045}, + {1376, 2053, 2053}, + {1991, 2058, 2059}, + {1635, 2065, 2065}, + {1992, 2073, 2074}, + {2014, 2080, 2081}, + {1546, 2085, 2087}, + {59, 2099, 2099}, + {1996, 2106, 2106}, + {1836, 2110, 2110}, + {2068, 2114, 2114}, + {1338, 2122, 2122}, + {1562, 2128, 2130}, + {1934, 2134, 2134}, + {2114, 2141, 2142}, + {977, 2149, 2150}, + {956, 2154, 2155}, + {1407, 2162, 2162}, + {1773, 2166, 2166}, + {883, 2171, 2171}, + {623, 2175, 2178}, + {1520, 2191, 2192}, + {1162, 2200, 2200}, + {912, 2204, 2204}, + {733, 2208, 2208}, + {1777, 2212, 2215}, + {1532, 2219, 2219}, + {718, 2223, 2225}, + {2069, 2229, 2229}, + {2207, 2245, 2246}, + {1139, 2264, 2264}, + {677, 2274, 2274}, + {2099, 2279, 2279}, + {1863, 2283, 2283}, + {1966, 2305, 2306}, + {2279, 2313, 2313}, + {1628, 2319, 2319}, + {755, 2329, 2329}, + {1461, 2334, 2334}, + {2117, 2340, 2340}, + {2313, 2349, 2349}, + {1859, 2353, 2353}, + {1048, 2362, 2362}, + {895, 2366, 2366}, + {2278, 2373, 2373}, + {1884, 2377, 2377}, + {1402, 2387, 2392}, + {700, 2398, 2398}, + {1971, 2402, 2402}, + {2009, 2419, 2419}, + {1441, 2426, 2428}, + {2208, 2432, 2432}, + {2038, 2436, 2436}, + {932, 2443, 2443}, + {1759, 2447, 2448}, + {744, 2452, 2452}, + {1875, 2458, 2458}, + {2405, 2468, 2468}, + {1596, 2472, 2473}, + {1953, 2480, 2482}, + {736, 2487, 2487}, + {1913, 2493, 2493}, + {774, 2497, 2497}, + {1484, 2506, 2508}, + {2432, 2512, 2512}, + {752, 2519, 2519}, + {2497, 2523, 2523}, + {2409, 2528, 2529}, + {2122, 2533, 2533}, + {2396, 2537, 2538}, + {2410, 2547, 2548}, + {1093, 2555, 2560}, + {551, 2564, 2565}, + {2268, 2569, 2569}, + {1362, 2580, 2580}, + {1916, 2584, 2585}, + {994, 2589, 2590}, + {1979, 2596, 2596}, + {1041, 2602, 2602}, + {2104, 2614, 2616}, + {2609, 2621, 2628}, + {2329, 2638, 2638}, + {2211, 2657, 2658}, + {2638, 2662, 2667}, + {2578, 2676, 2679}, + {2153, 2685, 2686}, + {2608, 2696, 2697}, + {598, 2712, 2712}, + {2620, 2719, 2720}, + {1888, 2724, 2728}, + {2709, 2732, 2732}, + {1365, 2739, 2739}, + {784, 2747, 2748}, + {424, 2753, 2753}, + {2204, 2759, 2759}, + {812, 2768, 2769}, + {2455, 2773, 2773}, + {1722, 2781, 2781}, + {1917, 2792, 2792}, + {2705, 2799, 2799}, + {2685, 2806, 2807}, + {2742, 2811, 2811}, + {1370, 2818, 2818}, + {2641, 2830, 2830}, + {2512, 2837, 2837}, + {2457, 2841, 2841}, + {2756, 2845, 2845}, + {2719, 2855, 2855}, + {1423, 2859, 2859}, + {2849, 2863, 2865}, + {1474, 2871, 2871}, + {1161, 2875, 2876}, + {2282, 2880, 2881}, + {2746, 2888, 2888}, + {1783, 2893, 2893}, + {2401, 2899, 2900}, + {2632, 2920, 2923}, + {2422, 2928, 2930}, + {2715, 2939, 2939}, + {2162, 2943, 2943}, + {2859, 2947, 2947}, + {1910, 2951, 2951}, + {1431, 2955, 2956}, + {1439, 2964, 2964}, + {2501, 2968, 2969}, + {2029, 2973, 2976}, + {689, 2983, 2984}, + {1658, 2988, 2988}, + {1031, 2996, 2996}, + {2149, 3001, 3002}, + {25, 3009, 3013}, + {2964, 3023, 3023}, + {953, 3027, 3028}, + {2359, 3036, 3036}, + {3023, 3049, 3049}, + {2880, 3055, 3056}, + {2973, 3076, 3077}, + {2874, 3090, 3090}, + {2871, 3094, 3094}, + {2532, 3100, 3100}, + {2938, 3107, 3108}, + {350, 3115, 3115}, + {2196, 3119, 3121}, + {1133, 3127, 3129}, + {1797, 3134, 3150}, + {3032, 3158, 3158}, + {3016, 3172, 3172}, + {2533, 3179, 3179}, + {3055, 3187, 3188}, + {1384, 3192, 3193}, + {2799, 3199, 3199}, + {2126, 3203, 3207}, + {2334, 3215, 3215}, + {2105, 3220, 3221}, + {3199, 3229, 3229}, + {2891, 3233, 3233}, + {855, 3240, 3240}, + {1852, 3253, 3256}, + {2140, 3263, 3263}, + {1682, 3268, 3270}, + {3243, 3274, 3274}, + {924, 3279, 3279}, + {2212, 3283, 3283}, + {2596, 3287, 3287}, + {2999, 3291, 3291}, + {2353, 3295, 3295}, + {2480, 3302, 3304}, + {1959, 3308, 3311}, + {3000, 3318, 3318}, + {845, 3330, 3330}, + {2283, 3334, 3334}, + {2519, 3342, 3342}, + {3325, 3346, 3348}, + {2397, 3353, 3354}, + {2763, 3358, 3358}, + {3198, 3363, 3364}, + {3211, 3368, 3372}, + {2950, 3376, 3377}, + {3245, 3388, 3391}, + {2264, 3398, 3398}, + {795, 3403, 3403}, + {3287, 3407, 3407}, + {3358, 3411, 3411}, + {3317, 3415, 3415}, + {3232, 3431, 3431}, + {2128, 3435, 3437}, + {3236, 3441, 3441}, + {3398, 3445, 3446}, + {2814, 3450, 3450}, + {3394, 3466, 3466}, + {2425, 3470, 3470}, + {3330, 3476, 3476}, + {1612, 3480, 3480}, + {1004, 3485, 3486}, + {2732, 3490, 3490}, + {1117, 3494, 3495}, + {629, 3501, 3501}, + {3087, 3514, 3514}, + {684, 3518, 3518}, + {3489, 3522, 3524}, + {1760, 3529, 3529}, + {617, 3537, 3537}, + {3431, 3541, 3541}, + {997, 3547, 3547}, + {882, 3552, 3553}, + {2419, 3558, 3558}, + {610, 3562, 3563}, + {1903, 3567, 3569}, + {3005, 3575, 3575}, + {3076, 3585, 3586}, + {3541, 3590, 3590}, + {3490, 3594, 3594}, + {1899, 3599, 3599}, + {3545, 3606, 3606}, + {3290, 3614, 3615}, + {2056, 3619, 3620}, + {3556, 3625, 3625}, + {3294, 3632, 3633}, + {637, 3643, 3644}, + {3609, 3648, 3650}, + {3175, 3658, 3658}, + {3498, 3665, 3665}, + {1597, 3669, 3669}, + {1983, 3673, 3673}, + {3215, 3682, 3682}, + {3544, 3689, 3689}, + {3694, 3698, 3698}, + {3228, 3715, 3716}, + {2594, 3720, 3722}, + {3573, 3726, 3726}, + {2479, 3732, 3735}, + {3191, 3741, 3742}, + {1113, 3746, 3747}, + {2844, 3751, 3751}, + {3445, 3756, 3757}, + {3755, 3766, 3766}, + {3421, 3775, 3780}, + {3593, 3784, 3786}, + {3263, 3796, 3796}, + {3469, 3806, 3806}, + {2602, 3815, 3815}, + {723, 3819, 3821}, + {1608, 3826, 3826}, + {3334, 3830, 3830}, + {2198, 3835, 3835}, + {2635, 3840, 3840}, + {3702, 3852, 3853}, + {3406, 3858, 3859}, + {3681, 3867, 3870}, + {3407, 3880, 3880}, + {340, 3889, 3889}, + {3772, 3893, 3893}, + {593, 3897, 3897}, + {2563, 3914, 3916}, + {2981, 3929, 3929}, + {1835, 3933, 3934}, + {3906, 3951, 3951}, + {1459, 3958, 3958}, + {3889, 3974, 3974}, + {2188, 3982, 3982}, + {3220, 3986, 3987}, + {3585, 3991, 3993}, + {3712, 3997, 4001}, + {2805, 4007, 4007}, + {1879, 4012, 4013}, + {3618, 4018, 4018}, + {1145, 4031, 4032}, + {3901, 4037, 4037}, + {2772, 4046, 4047}, + {2802, 4053, 4054}, + {3299, 4058, 4058}, + {3725, 4066, 4066}, + {2271, 4070, 4070}, + {385, 4075, 4076}, + {3624, 4089, 4090}, + {3745, 4096, 4098}, + {1563, 4102, 4102}, + {4045, 4106, 4111}, + {3696, 4115, 4119}, + {3376, 4125, 4126}, + {1880, 4130, 4130}, + {2048, 4140, 4141}, + {2724, 4149, 4149}, + {1767, 4156, 4156}, + {2601, 4164, 4164}, + {2757, 4168, 4168}, + {3974, 4172, 4172}, + {3914, 4178, 4178}, + {516, 4185, 4185}, + {1032, 4189, 4190}, + {3462, 4197, 4198}, + {3805, 4202, 4203}, + {3910, 4207, 4212}, + {3075, 4221, 4221}, + {3756, 4225, 4226}, + {1872, 4236, 4237}, + {3844, 4241, 4241}, + {3991, 4245, 4249}, + {2203, 4258, 4258}, + {3903, 4267, 4268}, + {705, 4272, 4272}, + {1896, 4276, 4276}, + {1955, 4285, 4288}, + {3746, 4302, 4303}, + {2672, 4311, 4311}, + {3969, 4317, 4317}, + {3883, 4322, 4322}, + {1920, 4339, 4340}, + {3527, 4344, 4346}, + {1160, 4358, 4358}, + {3648, 4364, 4366}, + {2711, 4387, 4387}, + {3619, 4391, 4392}, + {1944, 4396, 4396}, + {4369, 4400, 4400}, + {2736, 4404, 4407}, + {2546, 4411, 4412}, + {4390, 4422, 4422}, + {3610, 4426, 4427}, + {4058, 4431, 4431}, + {4374, 4435, 4435}, + {3463, 4445, 4446}, + {1813, 4452, 4452}, + {3669, 4456, 4456}, + {3830, 4460, 4460}, + {421, 4464, 4465}, + {1719, 4471, 4471}, + {3880, 4475, 4475}, + {1834, 4485, 4487}, + {3590, 4491, 4491}, + {442, 4496, 4497}, + {4435, 4501, 4501}, + {3814, 4509, 4509}, + {987, 4513, 4513}, + {4494, 4518, 4521}, + {3218, 4526, 4529}, + {4221, 4537, 4537}, + {2778, 4543, 4545}, + {4422, 4552, 4552}, + {4031, 4558, 4559}, + {4178, 4563, 4563}, + {3726, 4567, 4574}, + {4027, 4578, 4578}, + {4339, 4585, 4587}, + {3796, 4592, 4595}, + {543, 4600, 4613}, + {2855, 4620, 4621}, + {2795, 4627, 4627}, + {3440, 4631, 4632}, + {4279, 4636, 4639}, + {4245, 4643, 4645}, + {4516, 4649, 4650}, + {3133, 4654, 4654}, + {4042, 4658, 4659}, + {3422, 4663, 4663}, + {4046, 4667, 4668}, + {4267, 4672, 4672}, + {4004, 4676, 4677}, + {2490, 4682, 4682}, + {2451, 4697, 4697}, + {3027, 4705, 4705}, + {4028, 4717, 4717}, + {4460, 4721, 4721}, + {2471, 4725, 4727}, + {3090, 4735, 4735}, + {3192, 4739, 4740}, + {3835, 4760, 4760}, + {4540, 4764, 4764}, + {4007, 4772, 4774}, + {619, 4784, 4784}, + {3561, 4789, 4791}, + {3367, 4805, 4805}, + {4490, 4810, 4811}, + {2402, 4815, 4815}, + {3352, 4819, 4822}, + {2773, 4828, 4828}, + {4552, 4832, 4832}, + {2522, 4840, 4841}, + {316, 4847, 4852}, + {4715, 4858, 4858}, + {2959, 4862, 4862}, + {4858, 4868, 4869}, + {2134, 4873, 4873}, + {578, 4878, 4878}, + {4189, 4889, 4890}, + {2229, 4894, 4894}, + {4501, 4898, 4898}, + {2297, 4903, 4903}, + {2933, 4909, 4909}, + {3008, 4913, 4913}, + {3153, 4917, 4917}, + {4819, 4921, 4921}, + {4921, 4932, 4933}, + {4920, 4944, 4945}, + {4814, 4954, 4955}, + {576, 4966, 4966}, + {1854, 4970, 4971}, + {1374, 4975, 4976}, + {3307, 4980, 4980}, + {974, 4984, 4988}, + {4721, 4992, 4992}, + {4898, 4996, 4996}, + {4475, 5006, 5006}, + {3819, 5012, 5012}, + {1948, 5019, 5021}, + {4954, 5027, 5029}, + {3740, 5038, 5040}, + {4763, 5044, 5045}, + {1936, 5051, 5051}, + {4844, 5055, 5060}, + {4215, 5069, 5072}, + {1146, 5076, 5076}, + {3845, 5082, 5082}, + {4865, 5090, 5090}, + {4624, 5094, 5094}, + {4815, 5098, 5098}, + {5006, 5105, 5105}, + {4980, 5109, 5109}, + {4795, 5113, 5115}, + {5043, 5119, 5121}, + {4782, 5129, 5129}, + {3826, 5139, 5139}, + {3876, 5156, 5156}, + {3111, 5167, 5171}, + {1470, 5177, 5177}, + {4431, 5181, 5181}, + {546, 5189, 5189}, + {4225, 5193, 5193}, + {1672, 5199, 5201}, + {4207, 5205, 5209}, + {4220, 5216, 5217}, + {4658, 5224, 5225}, + {3295, 5235, 5235}, + {2436, 5239, 5239}, + {2349, 5246, 5246}, + {2175, 5250, 5250}, + {5180, 5257, 5258}, + {3161, 5263, 5263}, + {5105, 5272, 5272}, + {3552, 5282, 5282}, + {4944, 5299, 5300}, + {4130, 5312, 5313}, + {902, 5323, 5323}, + {913, 5327, 5327}, + {2987, 5333, 5334}, + {5150, 5344, 5344}, + {5249, 5348, 5348}, + {1965, 5358, 5359}, + {5330, 5364, 5364}, + {2012, 5373, 5377}, + {712, 5384, 5386}, + {5235, 5390, 5390}, + {5044, 5398, 5399}, + {564, 5406, 5406}, + {39, 5410, 5410}, + {4642, 5422, 5425}, + {4421, 5437, 5438}, + {2347, 5449, 5449}, + {5333, 5453, 5454}, + {4136, 5458, 5459}, + {3793, 5468, 5468}, + {2243, 5480, 5480}, + {4889, 5492, 5493}, + {4295, 5504, 5504}, + {2785, 5511, 5511}, + {2377, 5518, 5518}, + {3662, 5525, 5525}, + {5097, 5529, 5530}, + {4781, 5537, 5538}, + {4697, 5547, 5548}, + {436, 5552, 5553}, + {5542, 5558, 5558}, + {3692, 5562, 5562}, + {2696, 5568, 5569}, + {4620, 5578, 5578}, + {2898, 5590, 5590}, + {5557, 5596, 5618}, + {2797, 5623, 5625}, + {2792, 5629, 5629}, + {5243, 5633, 5633}, + {5348, 5637, 5637}, + {5547, 5643, 5643}, + {4296, 5654, 5655}, + {5568, 5662, 5662}, + {3001, 5670, 5671}, + {3794, 5679, 5679}, + {4006, 5685, 5686}, + {4969, 5690, 5692}, + {687, 5704, 5704}, + {4563, 5708, 5708}, + {1723, 5738, 5738}, + {649, 5742, 5742}, + {5163, 5748, 5755}, + {3907, 5759, 5759}, + {3074, 5764, 5764}, + {5326, 5771, 5771}, + {2951, 5776, 5776}, + {5181, 5780, 5780}, + {2614, 5785, 5788}, + {4709, 5794, 5794}, + {2784, 5799, 5799}, + {5518, 5803, 5803}, + {4155, 5812, 5815}, + {921, 5819, 5819}, + {5224, 5823, 5824}, + {2853, 5830, 5836}, + {5776, 5840, 5840}, + {2955, 5844, 5845}, + {5745, 5853, 5853}, + {3291, 5857, 5857}, + {2988, 5861, 5861}, + {2647, 5865, 5865}, + {5398, 5869, 5870}, + {1085, 5874, 5875}, + {4906, 5881, 5881}, + {802, 5886, 5886}, + {5119, 5890, 5893}, + {5802, 5899, 5900}, + {3415, 5904, 5904}, + {5629, 5908, 5908}, + {3714, 5912, 5914}, + {5558, 5921, 5921}, + {2710, 5927, 5928}, + {1094, 5932, 5934}, + {2653, 5940, 5941}, + {4735, 5954, 5954}, + {5861, 5958, 5958}, + {1040, 5971, 5971}, + {5514, 5977, 5977}, + {5048, 5981, 5982}, + {5953, 5992, 5993}, + {3751, 5997, 5997}, + {4991, 6001, 6002}, + {5885, 6006, 6007}, + {5529, 6011, 6012}, + {4974, 6019, 6020}, + {5857, 6024, 6024}, + {3483, 6032, 6032}, + {3594, 6036, 6036}, + {1997, 6040, 6040}, + {5997, 6044, 6047}, + {5197, 6051, 6051}, + {1764, 6055, 6055}, + {6050, 6059, 6059}, + {5239, 6063, 6063}, + {5049, 6067, 6067}, + {5957, 6073, 6074}, + {1022, 6078, 6078}, + {3414, 6083, 6084}, + {3809, 6090, 6090}, + {4562, 6095, 6096}, + {5878, 6104, 6104}, + {594, 6108, 6109}, + {3353, 6115, 6116}, + {4992, 6120, 6121}, + {2424, 6125, 6125}, + {4484, 6130, 6130}, + {3900, 6134, 6135}, + {5793, 6139, 6141}, + {3562, 6145, 6145}, + {1438, 6152, 6153}, + {6058, 6157, 6158}, + {4411, 6162, 6163}, + {4590, 6167, 6171}, + {4748, 6175, 6175}, + {5517, 6183, 6184}, + {6095, 6191, 6192}, + {1471, 6203, 6203}, + {2643, 6209, 6210}, + {450, 6220, 6220}, + {5266, 6226, 6226}, + {2576, 6233, 6233}, + {2607, 6239, 6240}, + {5164, 6244, 6251}, + {6054, 6255, 6255}, + {1789, 6260, 6261}, + {5250, 6265, 6265}, + {6062, 6273, 6278}, + {5990, 6282, 6282}, + {3283, 6286, 6286}, + {5436, 6290, 6290}, + {6059, 6294, 6294}, + {5668, 6298, 6300}, + {3072, 6324, 6329}, + {3132, 6338, 6339}, + {3246, 6343, 6344}, + {28, 6348, 6349}, + {1503, 6353, 6355}, + {6067, 6359, 6359}, + {3384, 6364, 6364}, + {545, 6375, 6376}, + {5803, 6380, 6380}, + {5522, 6384, 6385}, + {5908, 6389, 6389}, + {2796, 6393, 6396}, + {4831, 6403, 6404}, + {6388, 6412, 6412}, + {6005, 6417, 6420}, + {4450, 6430, 6430}, + {4050, 6435, 6435}, + {5372, 6441, 6441}, + {4378, 6447, 6447}, + {6199, 6452, 6452}, + {3026, 6456, 6456}, + {2642, 6460, 6462}, + {6392, 6470, 6470}, + {6459, 6474, 6474}, + {2829, 6487, 6488}, + {2942, 6499, 6504}, + {5069, 6508, 6511}, + {5341, 6515, 6516}, + {5853, 6521, 6525}, + {6104, 6531, 6531}, + {5759, 6535, 6538}, + {4672, 6542, 6543}, + {2443, 6550, 6550}, + {5109, 6554, 6554}, + {6494, 6558, 6560}, + {6006, 6570, 6572}, + {6424, 6576, 6580}, + {4693, 6591, 6592}, + {6439, 6596, 6597}, + {3179, 6601, 6601}, + {5299, 6606, 6607}, + {4148, 6612, 6613}, + {3774, 6617, 6617}, + {3537, 6623, 6624}, + {4975, 6628, 6629}, + {3848, 6636, 6636}, + {856, 6640, 6640}, + {5724, 6645, 6645}, + {6632, 6651, 6651}, + {4630, 6656, 6658}, + {1440, 6662, 6662}, + {4281, 6666, 6667}, + {4302, 6671, 6672}, + {2589, 6676, 6677}, + {5647, 6681, 6687}, + {6082, 6691, 6693}, + {6144, 6698, 6698}, + {6103, 6709, 6710}, + {3710, 6714, 6714}, + {4253, 6718, 6721}, + {2467, 6730, 6730}, + {4778, 6734, 6734}, + {6528, 6738, 6738}, + {4358, 6747, 6747}, + {5889, 6753, 6753}, + {5193, 6757, 6757}, + {5797, 6761, 6761}, + {3858, 6765, 6766}, + {5951, 6776, 6776}, + {6487, 6781, 6782}, + {3282, 6786, 6787}, + {4667, 6797, 6799}, + {1927, 6803, 6806}, + {6583, 6810, 6810}, + {4937, 6814, 6814}, + {6099, 6824, 6824}, + {4415, 6835, 6836}, + {6332, 6840, 6841}, + {5160, 6850, 6850}, + {4764, 6854, 6854}, + {6814, 6858, 6859}, + {3018, 6864, 6864}, + {6293, 6868, 6869}, + {6359, 6877, 6877}, + {3047, 6884, 6886}, + {5262, 6890, 6891}, + {5471, 6900, 6900}, + {3268, 6910, 6912}, + {1047, 6916, 6916}, + {5904, 6923, 6923}, + {5798, 6933, 6938}, + {4149, 6942, 6942}, + {1821, 6946, 6946}, + {3599, 6952, 6952}, + {6470, 6957, 6957}, + {5562, 6961, 6961}, + {6268, 6965, 6967}, + {6389, 6971, 6971}, + {6596, 6975, 6976}, + {6553, 6980, 6981}, + {6576, 6985, 6989}, + {1375, 6993, 6993}, + {652, 6998, 6998}, + {4876, 7002, 7003}, + {5768, 7011, 7013}, + {3973, 7017, 7017}, + {6802, 7025, 7025}, + {6955, 7034, 7036}, + {6974, 7040, 7040}, + {5944, 7044, 7044}, + {6992, 7048, 7054}, + {6872, 7059, 7059}, + {2943, 7063, 7063}, + {6923, 7067, 7067}, + {5094, 7071, 7071}, + {4873, 7075, 7075}, + {5819, 7079, 7079}, + {5945, 7085, 7085}, + {1540, 7090, 7091}, + {2090, 7095, 7095}, + {5024, 7104, 7105}, + {6900, 7109, 7109}, + {6024, 7113, 7114}, + {6000, 7118, 7120}, + {2187, 7124, 7125}, + {6760, 7129, 7130}, + {5898, 7134, 7136}, + {7032, 7144, 7144}, + {4271, 7148, 7148}, + {3706, 7152, 7152}, + {6970, 7156, 7157}, + {7088, 7161, 7163}, + {2718, 7168, 7169}, + {5674, 7175, 7175}, + {4631, 7182, 7182}, + {7070, 7188, 7189}, + {6220, 7196, 7196}, + {3458, 7201, 7202}, + {2041, 7211, 7212}, + {1454, 7216, 7216}, + {5199, 7225, 7227}, + {3529, 7234, 7234}, + {6890, 7238, 7238}, + {3815, 7242, 7243}, + {5490, 7250, 7253}, + {6554, 7257, 7263}, + {5890, 7267, 7269}, + {6877, 7273, 7273}, + {4877, 7277, 7277}, + {2502, 7285, 7285}, + {1483, 7289, 7295}, + {7210, 7304, 7308}, + {6845, 7313, 7316}, + {7219, 7320, 7320}, + {7001, 7325, 7329}, + {6853, 7333, 7334}, + {6120, 7338, 7338}, + {6606, 7342, 7343}, + {7020, 7348, 7350}, + {3509, 7354, 7354}, + {7133, 7359, 7363}, + {3434, 7371, 7374}, + {2787, 7384, 7384}, + {7044, 7388, 7388}, + {6960, 7394, 7395}, + {6676, 7399, 7400}, + {7161, 7404, 7404}, + {7285, 7417, 7418}, + {4558, 7425, 7426}, + {4828, 7430, 7430}, + {6063, 7436, 7436}, + {3597, 7442, 7442}, + {914, 7446, 7446}, + {7320, 7452, 7454}, + {7267, 7458, 7460}, + {5076, 7464, 7464}, + {7430, 7468, 7469}, + {6273, 7473, 7474}, + {7440, 7478, 7487}, + {7348, 7491, 7494}, + {1021, 7510, 7510}, + {7473, 7515, 7515}, + {2823, 7519, 7519}, + {6264, 7527, 7527}, + {7302, 7531, 7531}, + {7089, 7535, 7535}, + {7342, 7540, 7541}, + {3688, 7547, 7551}, + {3054, 7558, 7560}, + {4177, 7566, 7567}, + {6691, 7574, 7575}, + {7156, 7585, 7586}, + {7147, 7590, 7592}, + {7407, 7598, 7598}, + {7403, 7602, 7603}, + {6868, 7607, 7607}, + {6636, 7611, 7611}, + {4805, 7617, 7617}, + {5779, 7623, 7623}, + {7063, 7627, 7627}, + {5079, 7632, 7632}, + {7377, 7637, 7637}, + {7337, 7641, 7642}, + {6738, 7655, 7655}, + {7338, 7659, 7659}, + {6541, 7669, 7671}, + {595, 7675, 7675}, + {7658, 7679, 7680}, + {7647, 7685, 7686}, + {2477, 7690, 7690}, + {5823, 7694, 7694}, + {4156, 7699, 7699}, + {5931, 7703, 7706}, + {6854, 7712, 7712}, + {4931, 7718, 7718}, + {6979, 7722, 7722}, + {5085, 7727, 7727}, + {6965, 7732, 7732}, + {7201, 7736, 7737}, + {3639, 7741, 7743}, + {7534, 7749, 7749}, + {4292, 7753, 7753}, + {3427, 7759, 7763}, + {7273, 7767, 7767}, + {940, 7778, 7778}, + {4838, 7782, 7785}, + {4216, 7790, 7792}, + {922, 7800, 7801}, + {7256, 7810, 7811}, + {7789, 7815, 7819}, + {7225, 7823, 7825}, + {7531, 7829, 7829}, + {6997, 7833, 7833}, + {7757, 7837, 7838}, + {4129, 7842, 7842}, + {7333, 7848, 7849}, + {6776, 7855, 7855}, + {7527, 7859, 7859}, + {4370, 7863, 7863}, + {4512, 7868, 7868}, + {5679, 7880, 7880}, + {3162, 7884, 7885}, + {3933, 7892, 7894}, + {7804, 7899, 7902}, + {6363, 7906, 7907}, + {7848, 7911, 7912}, + {5584, 7917, 7921}, + {874, 7926, 7926}, + {3342, 7930, 7930}, + {4507, 7935, 7937}, + {3672, 7943, 7944}, + {7911, 7948, 7949}, + {6402, 7956, 7956}, + {7940, 7960, 7960}, + {7113, 7964, 7964}, + {1073, 7968, 7968}, + {7740, 7974, 7974}, + {7601, 7978, 7982}, + {6797, 7987, 7988}, + {3528, 7994, 7995}, + {5483, 7999, 7999}, + {5717, 8011, 8011}, + {5480, 8017, 8017}, + {7770, 8023, 8030}, + {2452, 8034, 8034}, + {5282, 8047, 8047}, + {7967, 8051, 8051}, + {1128, 8058, 8066}, + {6348, 8070, 8070}, + {8055, 8077, 8077}, + {7925, 8081, 8086}, + {6810, 8090, 8090}, + {5051, 8101, 8101}, + {4696, 8109, 8110}, + {5129, 8119, 8119}, + {4449, 8123, 8123}, + {7222, 8127, 8127}, + {4649, 8131, 8134}, + {7994, 8138, 8138}, + {5954, 8148, 8148}, + {475, 8152, 8153}, + {7906, 8157, 8157}, + {7458, 8164, 8166}, + {7632, 8171, 8173}, + {3874, 8177, 8183}, + {4391, 8187, 8187}, + {561, 8191, 8191}, + {2417, 8195, 8195}, + {2357, 8204, 8204}, + {2269, 8216, 8218}, + {3968, 8222, 8222}, + {2200, 8226, 8227}, + {3453, 8247, 8247}, + {2439, 8251, 8252}, + {7175, 8257, 8257}, + {976, 8262, 8264}, + {4953, 8273, 8273}, + {4219, 8278, 8278}, + {6, 8285, 8291}, + {5703, 8295, 8296}, + {5272, 8300, 8300}, + {8037, 8304, 8304}, + {8186, 8314, 8314}, + {8304, 8318, 8318}, + {8051, 8326, 8326}, + {8318, 8330, 8330}, + {2671, 8334, 8335}, + {2662, 8339, 8339}, + {8081, 8349, 8350}, + {3328, 8356, 8356}, + {2879, 8360, 8362}, + {8050, 8370, 8371}, + {8330, 8375, 8376}, + {8375, 8386, 8386}, + {4961, 8390, 8390}, + {1017, 8403, 8405}, + {3533, 8416, 8416}, + {4555, 8422, 8422}, + {6445, 8426, 8426}, + {8169, 8432, 8432}, + {990, 8436, 8436}, + {4102, 8440, 8440}, + {7398, 8444, 8446}, + {3480, 8450, 8450}, + {6324, 8462, 8462}, + {7948, 8466, 8467}, + {5950, 8471, 8471}, + {5189, 8476, 8476}, + {4026, 8490, 8490}, + {8374, 8494, 8495}, + {4682, 8501, 8501}, + {7387, 8506, 8506}, + {8164, 8510, 8515}, + {4079, 8524, 8524}, + {8360, 8529, 8531}, + {7446, 8540, 8543}, + {7971, 8547, 8548}, + {4311, 8552, 8552}, + {5204, 8556, 8557}, + {7968, 8562, 8562}, + {7847, 8571, 8573}, + {8547, 8577, 8577}, + {5320, 8581, 8581}, + {8556, 8585, 8586}, + {8504, 8590, 8590}, + {7669, 8602, 8604}, + {5874, 8608, 8609}, + {5828, 8613, 8613}, + {7998, 8617, 8617}, + {8519, 8625, 8625}, + {7250, 8637, 8637}, + {426, 8641, 8641}, + {8436, 8645, 8645}, + {5986, 8649, 8656}, + {8157, 8660, 8660}, + {7182, 8665, 8665}, + {8421, 8675, 8675}, + {8509, 8681, 8681}, + {5137, 8688, 8689}, + {8625, 8694, 8695}, + {5228, 8701, 8702}, + {6661, 8714, 8714}, + {1010, 8719, 8719}, + {6648, 8723, 8723}, + {3500, 8728, 8728}, + {2442, 8735, 8735}, + {8494, 8740, 8741}, + {8171, 8753, 8755}, + {7242, 8763, 8764}, + {4739, 8768, 8769}, + {7079, 8773, 8773}, + {8386, 8777, 8777}, + {8624, 8781, 8787}, + {661, 8791, 8794}, + {8631, 8801, 8801}, + {7753, 8805, 8805}, + {4783, 8809, 8810}, + {1673, 8814, 8815}, + {6623, 8819, 8819}, + {4404, 8823, 8823}, + {8089, 8827, 8828}, + {8773, 8832, 8832}, + {5394, 8836, 8836}, + {6231, 8841, 8843}, + {1015, 8852, 8853}, + {6873, 8857, 8857}, + {6289, 8865, 8865}, + {8577, 8869, 8869}, + {8114, 8873, 8875}, + {8534, 8883, 8883}, + {3007, 8887, 8888}, + {8827, 8892, 8893}, + {4788, 8897, 8900}, + {5698, 8906, 8907}, + {7690, 8911, 8911}, + {6643, 8919, 8919}, + {7206, 8923, 8924}, + {7866, 8929, 8931}, + {8880, 8942, 8942}, + {8630, 8951, 8952}, + {6027, 8958, 8958}, + {7749, 8966, 8967}, + {4932, 8972, 8973}, + {8892, 8980, 8981}, + {634, 9003, 9003}, + {8109, 9007, 9008}, + {8777, 9012, 9012}, + {3981, 9016, 9017}, + {5723, 9025, 9025}, + {7662, 9034, 9038}, + {8955, 9042, 9042}, + {8070, 9060, 9062}, + {8910, 9066, 9066}, + {5363, 9070, 9071}, + {7699, 9075, 9076}, + {8991, 9081, 9081}, + {6850, 9085, 9085}, + {5811, 9092, 9094}, + {9079, 9098, 9102}, + {6456, 9106, 9106}, + {2259, 9111, 9111}, + {4752, 9116, 9116}, + {9060, 9120, 9123}, + {8090, 9127, 9127}, + {5305, 9131, 9132}, + {8623, 9137, 9137}, + {7417, 9141, 9141}, + {6564, 9148, 9149}, + {9126, 9157, 9158}, + {4285, 9169, 9170}, + {8698, 9174, 9174}, + {8869, 9178, 9178}, + {2572, 9182, 9183}, + {6482, 9188, 9190}, + {9181, 9201, 9201}, + {2968, 9208, 9209}, + {2506, 9213, 9215}, + {9127, 9219, 9219}, + {7910, 9225, 9227}, + {5422, 9235, 9239}, + {8813, 9244, 9246}, + {9178, 9250, 9250}, + {8748, 9255, 9255}, + {7354, 9265, 9265}, + {7767, 9269, 9269}, + {7710, 9281, 9283}, + {8826, 9288, 9290}, + {861, 9295, 9295}, + {4482, 9301, 9301}, + {9264, 9305, 9306}, + {8805, 9310, 9310}, + {4995, 9314, 9314}, + {6730, 9318, 9318}, + {7457, 9328, 9328}, + {2547, 9335, 9336}, + {6298, 9340, 9343}, + {9305, 9353, 9354}, + {9269, 9358, 9358}, + {6338, 9370, 9370}, + {7289, 9376, 9379}, + {5780, 9383, 9383}, + {7607, 9387, 9387}, + {2065, 9392, 9392}, + {7238, 9396, 9396}, + {8856, 9400, 9400}, + {8069, 9412, 9413}, + {611, 9420, 9420}, + {7071, 9424, 9424}, + {3089, 9430, 9431}, + {7117, 9435, 9438}, + {1976, 9445, 9445}, + {6640, 9449, 9449}, + {5488, 9453, 9453}, + {8739, 9457, 9459}, + {5958, 9466, 9466}, + {7985, 9470, 9470}, + {8735, 9475, 9475}, + {5009, 9479, 9479}, + {8073, 9483, 9484}, + {2328, 9490, 9491}, + {9250, 9495, 9495}, + {4043, 9502, 9502}, + {7712, 9506, 9506}, + {9012, 9510, 9510}, + {9028, 9514, 9515}, + {2190, 9521, 9524}, + {9029, 9528, 9528}, + {9519, 9532, 9532}, + {9495, 9536, 9536}, + {8527, 9540, 9540}, + {2137, 9550, 9550}, + {8419, 9557, 9557}, + {9383, 9561, 9562}, + {8970, 9575, 9578}, + {8911, 9582, 9582}, + {7828, 9595, 9596}, + {6180, 9600, 9600}, + {8738, 9604, 9607}, + {7540, 9611, 9612}, + {9599, 9616, 9618}, + {9187, 9623, 9623}, + {9294, 9628, 9629}, + {4536, 9639, 9639}, + {3867, 9643, 9643}, + {6305, 9648, 9648}, + {1617, 9654, 9657}, + {5762, 9666, 9666}, + {8314, 9670, 9670}, + {9666, 9674, 9675}, + {9506, 9679, 9679}, + {9669, 9685, 9686}, + {9683, 9690, 9690}, + {8763, 9697, 9698}, + {7468, 9702, 9702}, + {460, 9707, 9707}, + {3115, 9712, 9712}, + {9424, 9716, 9717}, + {7359, 9721, 9724}, + {7547, 9728, 9729}, + {7151, 9733, 9738}, + {7627, 9742, 9742}, + {2822, 9747, 9747}, + {8247, 9751, 9753}, + {9550, 9758, 9758}, + {7585, 9762, 9763}, + {1002, 9767, 9767}, + {7168, 9772, 9773}, + {6941, 9777, 9780}, + {9728, 9784, 9786}, + {9770, 9792, 9796}, + {6411, 9801, 9802}, + {3689, 9806, 9808}, + {9575, 9814, 9816}, + {7025, 9820, 9821}, + {2776, 9826, 9826}, + {9806, 9830, 9830}, + {9820, 9834, 9835}, + {9800, 9839, 9847}, + {9834, 9851, 9852}, + {9829, 9856, 9862}, + {1400, 9866, 9866}, + {3197, 9870, 9871}, + {9851, 9875, 9876}, + {9742, 9883, 9884}, + {3362, 9888, 9889}, + {9883, 9893, 9893}, + {5711, 9899, 9910}, + {7806, 9915, 9915}, + {9120, 9919, 9919}, + {9715, 9925, 9934}, + {2580, 9938, 9938}, + {4907, 9942, 9944}, + {6239, 9953, 9954}, + {6961, 9963, 9963}, + {5295, 9967, 9968}, + {1915, 9972, 9973}, + {3426, 9983, 9985}, + {9875, 9994, 9995}, + {6942, 9999, 9999}, + {6621, 10005, 10005}, + {7589, 10010, 10012}, + {9286, 10020, 10020}, + {838, 10024, 10024}, + {9980, 10028, 10031}, + {9994, 10035, 10041}, + {2702, 10048, 10051}, + {2621, 10059, 10059}, + {10054, 10065, 10065}, + {8612, 10073, 10074}, + {7033, 10078, 10078}, + {916, 10082, 10082}, + {10035, 10086, 10087}, + {8613, 10097, 10097}, + {9919, 10107, 10108}, + {6133, 10114, 10115}, + {10059, 10119, 10119}, + {10065, 10126, 10127}, + {7732, 10131, 10131}, + {7155, 10135, 10136}, + {6728, 10140, 10140}, + {6162, 10144, 10145}, + {4724, 10150, 10150}, + {1665, 10154, 10154}, + {10126, 10163, 10163}, + {9783, 10168, 10168}, + {1715, 10172, 10173}, + {7152, 10177, 10182}, + {8760, 10187, 10187}, + {7829, 10191, 10191}, + {9679, 10196, 10196}, + {9369, 10201, 10201}, + {2928, 10206, 10208}, + {6951, 10214, 10217}, + {5633, 10221, 10221}, + {7199, 10225, 10225}, + {10118, 10230, 10231}, + {9999, 10235, 10236}, + {10045, 10240, 10249}, + {5565, 10256, 10256}, + {9866, 10261, 10261}, + {10163, 10268, 10268}, + {9869, 10272, 10272}, + {9789, 10276, 10283}, + {10235, 10287, 10288}, + {10214, 10298, 10299}, + {6971, 10303, 10303}, + {3346, 10307, 10307}, + {10185, 10311, 10312}, + {9993, 10318, 10320}, + {2779, 10332, 10334}, + {1726, 10338, 10338}, + {741, 10354, 10360}, + {10230, 10372, 10373}, + {10260, 10384, 10385}, + {10131, 10389, 10398}, + {6946, 10406, 10409}, + {10158, 10413, 10420}, + {10123, 10424, 10424}, + {6157, 10428, 10429}, + {4518, 10434, 10434}, + {9893, 10438, 10438}, + {9865, 10442, 10446}, + {7558, 10454, 10454}, + {10434, 10460, 10460}, + {10064, 10466, 10468}, + {2703, 10472, 10474}, + {9751, 10478, 10479}, + {6714, 10485, 10485}, + {8020, 10490, 10490}, + {10303, 10494, 10494}, + {3521, 10499, 10500}, + {9281, 10513, 10515}, + {6028, 10519, 10523}, + {9387, 10527, 10527}, + {7614, 10531, 10531}, + {3611, 10536, 10536}, + {9162, 10540, 10540}, + {10081, 10546, 10547}, + {10034, 10560, 10562}, + {6726, 10567, 10571}, + {8237, 10575, 10575}, + {10438, 10579, 10583}, + {10140, 10587, 10587}, + {5784, 10592, 10592}, + {9819, 10597, 10600}, + {10567, 10604, 10608}, + {9335, 10613, 10613}, + {8300, 10617, 10617}, + {10575, 10621, 10621}, + {9678, 10625, 10626}, + {9962, 10632, 10633}, + {10535, 10637, 10638}, + {8199, 10642, 10642}, + {10372, 10647, 10648}, + {10637, 10656, 10657}, + {10579, 10667, 10668}, + {10465, 10677, 10680}, + {6702, 10684, 10685}, + {10073, 10691, 10692}, + {4505, 10696, 10697}, + {9042, 10701, 10701}, + {6460, 10705, 10706}, + {10010, 10714, 10716}, + {10656, 10720, 10722}, + {7282, 10727, 10729}, + {2327, 10733, 10733}, + {2491, 10740, 10741}, + {10704, 10748, 10750}, + {6465, 10754, 10754}, + {10647, 10758, 10759}, + {10424, 10763, 10763}, + {10748, 10776, 10776}, + {10546, 10780, 10781}, + {10758, 10785, 10786}, + {10287, 10790, 10797}, + {10785, 10801, 10807}, + {10240, 10811, 10826}, + {9509, 10830, 10830}, + {2579, 10836, 10838}, + {9801, 10843, 10845}, + {7555, 10849, 10850}, + {10776, 10860, 10865}, + {8023, 10869, 10869}, + {10046, 10876, 10884}, + {10253, 10888, 10892}, + {9941, 10897, 10897}, + {7898, 10901, 10905}, + {6725, 10909, 10913}, + {10757, 10921, 10923}, + {10160, 10931, 10931}, + {10916, 10935, 10942}, + {10261, 10946, 10946}, + {10318, 10952, 10954}, + {5911, 10959, 10961}, + {10801, 10965, 10966}, + {10946, 10970, 10977}, + {10592, 10982, 10984}, + {9913, 10988, 10990}, + {8510, 10994, 10996}, + {9419, 11000, 11001}, + {6765, 11006, 11007}, + {10725, 11011, 11011}, + {5537, 11017, 11019}, + {9208, 11024, 11025}, + {5850, 11030, 11030}, + {9610, 11034, 11036}, + {8846, 11041, 11047}, + {9697, 11051, 11051}, + {1622, 11055, 11058}, + {2370, 11062, 11062}, + {8393, 11067, 11067}, + {9756, 11071, 11071}, + {10172, 11076, 11076}, + {27, 11081, 11081}, + {7357, 11087, 11092}, + {8151, 11104, 11106}, + {6115, 11110, 11110}, + {10667, 11114, 11115}, + {11099, 11121, 11123}, + {10705, 11127, 11127}, + {8938, 11131, 11131}, + {11114, 11135, 11136}, + {1390, 11140, 11141}, + {10964, 11146, 11148}, + {11140, 11152, 11155}, + {9813, 11159, 11166}, + {624, 11171, 11172}, + {3118, 11177, 11179}, + {11029, 11184, 11186}, + {10186, 11190, 11190}, + {10306, 11196, 11196}, + {8665, 11201, 11201}, + {7382, 11205, 11205}, + {1100, 11210, 11210}, + {2337, 11216, 11217}, + {1609, 11221, 11223}, + {5763, 11228, 11229}, + {5220, 11233, 11233}, + {11061, 11241, 11241}, + {10617, 11246, 11246}, + {11190, 11250, 11251}, + {10144, 11255, 11256}, + {11232, 11260, 11260}, + {857, 11264, 11265}, + {10994, 11269, 11271}, + {3879, 11280, 11281}, + {11184, 11287, 11289}, + {9611, 11293, 11295}, + {11250, 11299, 11299}, + {4495, 11304, 11304}, + {7574, 11308, 11309}, + {9814, 11315, 11317}, + {1713, 11321, 11324}, + {1905, 11328, 11328}, + {8745, 11335, 11340}, + {8883, 11351, 11351}, + {8119, 11358, 11358}, + {1842, 11363, 11364}, + {11237, 11368, 11368}, + {8814, 11373, 11374}, + {5684, 11378, 11378}, + {11011, 11382, 11382}, + {6520, 11389, 11389}, + {11183, 11393, 11396}, + {1790, 11404, 11404}, + {9536, 11408, 11408}, + {11298, 11418, 11419}, + {3929, 11425, 11425}, + {5588, 11429, 11429}, + {8476, 11436, 11436}, + {4096, 11440, 11442}, + {11084, 11446, 11454}, + {10603, 11458, 11463}, + {7332, 11472, 11474}, + {7611, 11483, 11486}, + {4836, 11490, 11491}, + {10024, 11495, 11495}, + {4917, 11501, 11506}, + {6486, 11510, 11512}, + {11269, 11516, 11518}, + {3603, 11522, 11525}, + {11126, 11535, 11535}, + {11418, 11539, 11541}, + {11408, 11545, 11545}, + {9021, 11549, 11552}, + {6745, 11557, 11557}, + {5118, 11561, 11564}, + {7590, 11568, 11569}, + {4426, 11573, 11578}, + {9790, 11582, 11583}, + {6447, 11587, 11587}, + {10229, 11591, 11594}, + {10457, 11598, 11598}, + {10168, 11604, 11604}, + {10543, 11608, 11608}, + {7404, 11612, 11612}, + {11127, 11616, 11616}, + {3337, 11620, 11620}, + {11501, 11624, 11628}, + {4543, 11633, 11635}, + {8449, 11642, 11642}, + {4943, 11646, 11648}, + {10526, 11652, 11654}, + {11620, 11659, 11659}, + {8927, 11664, 11669}, + {532, 11673, 11673}, + {10513, 11677, 11679}, + {10428, 11683, 11683}, + {10999, 11689, 11690}, + {9469, 11695, 11695}, + {3606, 11699, 11699}, + {9560, 11708, 11709}, + {1564, 11714, 11714}, + {10527, 11718, 11718}, + {3071, 11723, 11726}, + {11590, 11731, 11732}, + {6605, 11737, 11737}, + {11624, 11741, 11745}, + {7822, 11749, 11752}, + {5269, 11757, 11758}, + {1339, 11767, 11767}, + {1363, 11771, 11773}, + {3704, 11777, 11777}, + {10952, 11781, 11783}, + {6764, 11793, 11795}, + {8675, 11800, 11800}, + {9963, 11804, 11804}, + {11573, 11808, 11809}, + {9548, 11813, 11813}, + {11591, 11817, 11818}, + {11446, 11822, 11822}, + {9224, 11828, 11828}, + {3158, 11836, 11836}, + {10830, 11840, 11840}, + {7234, 11846, 11846}, + {11299, 11850, 11850}, + {11544, 11854, 11855}, + {11498, 11859, 11859}, + {10993, 11865, 11868}, + {9720, 11872, 11878}, + {10489, 11882, 11890}, + {11712, 11898, 11904}, + {11516, 11908, 11910}, + {11568, 11914, 11915}, + {10177, 11919, 11924}, + {11363, 11928, 11929}, + {10494, 11933, 11933}, + {9870, 11937, 11938}, + {9427, 11942, 11942}, + {11481, 11949, 11949}, + {6030, 11955, 11957}, + {11718, 11961, 11961}, + {10531, 11965, 11983}, + {5126, 11987, 11987}, + {7515, 11991, 11991}, + {10646, 11996, 11997}, + {2947, 12001, 12001}, + {9582, 12009, 12010}, + {6202, 12017, 12018}, + {11714, 12022, 12022}, + {9235, 12033, 12037}, + {9721, 12041, 12044}, + {11932, 12051, 12052}, + {12040, 12056, 12056}, + {12051, 12060, 12060}, + {11601, 12066, 12066}, + {8426, 12070, 12070}, + {4053, 12077, 12077}, + {4262, 12081, 12081}, + {9761, 12086, 12088}, + {11582, 12092, 12093}, + {10965, 12097, 12098}, + {11803, 12103, 12104}, + {11933, 12108, 12109}, + {10688, 12117, 12117}, + {12107, 12125, 12126}, + {6774, 12130, 12132}, + {6286, 12137, 12137}, + {9543, 12141, 12141}, + {12097, 12145, 12146}, + {10790, 12150, 12150}, + {10125, 12154, 12156}, + {12125, 12164, 12164}, + {12064, 12168, 12172}, + {10811, 12178, 12188}, + {12092, 12192, 12193}, + {10058, 12197, 12198}, + {11611, 12211, 12212}, + {3459, 12216, 12216}, + {10291, 12225, 12228}, + {12191, 12232, 12234}, + {12145, 12238, 12238}, + {12001, 12242, 12250}, + {3840, 12255, 12255}, + {12216, 12259, 12259}, + {674, 12272, 12272}, + {12141, 12276, 12276}, + {10766, 12280, 12280}, + {11545, 12284, 12284}, + {6496, 12290, 12290}, + {11381, 12294, 12295}, + {603, 12302, 12303}, + {12276, 12308, 12308}, + {11850, 12313, 12314}, + {565, 12319, 12319}, + {9351, 12324, 12324}, + {11822, 12328, 12328}, + {2691, 12333, 12334}, + {11840, 12338, 12338}, + {11070, 12343, 12343}, + {9510, 12347, 12347}, + {11024, 12352, 12353}, + {7173, 12359, 12359}, + {517, 12363, 12363}, + {6311, 12367, 12368}, + {11367, 12372, 12373}, + {12008, 12377, 12377}, + {11372, 12382, 12384}, + {11358, 12391, 12392}, + {11382, 12396, 12396}, + {6882, 12400, 12401}, + {11246, 12405, 12405}, + {8359, 12409, 12412}, + {10154, 12418, 12418}, + {12016, 12425, 12426}, + {8972, 12434, 12435}, + {10478, 12439, 12440}, + {12395, 12449, 12449}, + {11612, 12454, 12454}, + {12347, 12458, 12458}, + {10700, 12466, 12467}, + {3637, 12471, 12476}, + {1042, 12480, 12481}, + {6747, 12488, 12488}, + {12396, 12492, 12493}, + {9420, 12497, 12497}, + {11285, 12501, 12510}, + {4470, 12515, 12515}, + {9374, 12519, 12519}, + {11293, 12528, 12528}, + {2058, 12534, 12535}, + {6521, 12539, 12539}, + {12492, 12543, 12543}, + {3043, 12547, 12547}, + {2982, 12551, 12553}, + {11030, 12557, 12563}, + {7636, 12568, 12568}, + {9639, 12572, 12572}, + {12543, 12576, 12576}, + {5989, 12580, 12583}, + {11051, 12587, 12587}, + {1061, 12592, 12594}, + {12313, 12599, 12601}, + {11846, 12605, 12605}, + {12576, 12609, 12609}, + {11040, 12618, 12625}, + {12479, 12629, 12629}, + {6903, 12633, 12633}, + {12322, 12639, 12639}, + {12253, 12643, 12645}, + {5594, 12651, 12651}, + {12522, 12655, 12655}, + {11703, 12659, 12659}, + {1377, 12665, 12665}, + {8022, 12669, 12669}, + {12280, 12674, 12674}, + {9023, 12680, 12681}, + {12328, 12685, 12685}, + {3085, 12689, 12693}, + {4700, 12698, 12698}, + {10224, 12702, 12702}, + {8781, 12706, 12706}, + {1651, 12710, 12710}, + {12458, 12714, 12714}, + {12005, 12718, 12721}, + {11908, 12725, 12726}, + {8202, 12733, 12733}, + {11708, 12739, 12740}, + {12599, 12744, 12745}, + {12284, 12749, 12749}, + {5285, 12756, 12756}, + {12055, 12775, 12777}, + {6919, 12782, 12782}, + {12242, 12786, 12786}, + {12009, 12790, 12790}, + {9628, 12794, 12796}, + {11354, 12801, 12802}, + {10225, 12806, 12807}, + {579, 12813, 12813}, + {8935, 12817, 12822}, + {8753, 12827, 12829}, + {11006, 12835, 12835}, + {858, 12841, 12845}, + {476, 12849, 12849}, + {7667, 12854, 12854}, + {12760, 12860, 12871}, + {11677, 12875, 12877}, + {12714, 12881, 12881}, + {12731, 12885, 12890}, + {7108, 12894, 12896}, + {1165, 12900, 12900}, + {4021, 12906, 12906}, + {10829, 12910, 12911}, + {12331, 12915, 12915}, + {8887, 12919, 12921}, + {11639, 12925, 12925}, + {7964, 12929, 12929}, + {12528, 12937, 12937}, + {8148, 12941, 12941}, + {12770, 12948, 12950}, + {12609, 12954, 12954}, + {12685, 12958, 12958}, + {2803, 12962, 12962}, + {9561, 12966, 12966}, + {6671, 12972, 12973}, + {12056, 12977, 12977}, + {6380, 12981, 12981}, + {12048, 12985, 12985}, + {11961, 12989, 12993}, + {3368, 12997, 12999}, + {6634, 13004, 13004}, + {6775, 13009, 13010}, + {12136, 13014, 13019}, + {10341, 13023, 13023}, + {13002, 13027, 13027}, + {10587, 13031, 13031}, + {10307, 13035, 13035}, + {12736, 13039, 13039}, + {12744, 13043, 13044}, + {6175, 13048, 13048}, + {9702, 13053, 13054}, + {662, 13059, 13061}, + {12718, 13065, 13068}, + {12893, 13072, 13075}, + {8299, 13086, 13091}, + {12604, 13095, 13096}, + {12848, 13100, 13101}, + {12749, 13105, 13105}, + {12526, 13109, 13114}, + {9173, 13122, 13122}, + {12769, 13128, 13128}, + {13038, 13132, 13132}, + {12725, 13136, 13137}, + {12639, 13146, 13146}, + {9711, 13150, 13151}, + {12137, 13155, 13155}, + {13039, 13159, 13159}, + {4681, 13163, 13164}, + {12954, 13168, 13168}, + {13158, 13175, 13176}, + {13105, 13180, 13180}, + {10754, 13184, 13184}, + {13167, 13188, 13188}, + {12658, 13192, 13192}, + {4294, 13199, 13200}, + {11682, 13204, 13205}, + {11695, 13209, 13209}, + {11076, 13214, 13214}, + {12232, 13218, 13218}, + {9399, 13223, 13224}, + {12880, 13228, 13229}, + {13048, 13234, 13234}, + {9701, 13238, 13239}, + {13209, 13243, 13243}, + {3658, 13248, 13248}, + {3698, 13252, 13254}, + {12237, 13260, 13260}, + {8872, 13266, 13266}, + {12957, 13272, 13273}, + {1393, 13281, 13281}, + {2013, 13285, 13288}, + {4244, 13296, 13299}, + {9428, 13303, 13303}, + {12702, 13307, 13307}, + {13078, 13311, 13311}, + {6071, 13315, 13315}, + {3061, 13319, 13319}, + {2051, 13324, 13324}, + {11560, 13328, 13331}, + {6584, 13336, 13336}, + {8482, 13340, 13340}, + {5331, 13344, 13344}, + {4171, 13348, 13348}, + {8501, 13352, 13352}, + {9219, 13356, 13356}, + {9473, 13360, 13363}, + {12881, 13367, 13367}, + {13065, 13371, 13375}, + {2979, 13379, 13384}, + {1518, 13388, 13388}, + {11177, 13392, 13392}, + {9457, 13398, 13398}, + {12293, 13407, 13410}, + {3697, 13414, 13417}, + {10338, 13425, 13425}, + {13367, 13429, 13429}, + {11074, 13433, 13437}, + {4201, 13441, 13443}, + {1812, 13447, 13448}, + {13360, 13452, 13456}, + {13188, 13463, 13463}, + {9732, 13470, 13470}, + {11332, 13477, 13477}, + {9918, 13487, 13487}, + {6337, 13497, 13497}, + {13429, 13501, 13501}, + {11413, 13505, 13505}, + {4685, 13512, 13513}, + {13136, 13517, 13519}, + {7416, 13528, 13530}, + {12929, 13534, 13534}, + {11110, 13539, 13539}, + {11521, 13543, 13543}, + {12825, 13553, 13553}, + {13447, 13557, 13558}, + {12299, 13562, 13563}, + {9003, 13570, 13570}, + {12500, 13577, 13577}, + {13501, 13581, 13581}, + {9392, 13586, 13586}, + {12454, 13590, 13590}, + {6189, 13595, 13595}, + {13053, 13599, 13599}, + {11881, 13604, 13604}, + {13159, 13608, 13608}, + {4894, 13612, 13612}, + {13221, 13621, 13621}, + {8950, 13625, 13625}, + {13533, 13629, 13629}, + {9633, 13633, 13633}, + {7892, 13637, 13639}, + {13581, 13643, 13643}, + {13616, 13647, 13649}, + {12794, 13653, 13654}, + {8919, 13659, 13659}, + {9674, 13663, 13663}, + {13577, 13668, 13668}, + {12966, 13672, 13672}, + {12659, 13676, 13683}, + {6124, 13688, 13688}, + {9225, 13693, 13695}, + {11833, 13702, 13702}, + {12904, 13709, 13717}, + {13647, 13721, 13722}, + {11687, 13726, 13727}, + {12434, 13731, 13732}, + {12689, 13736, 13742}, + {13168, 13746, 13746}, + {6151, 13751, 13752}, + {11821, 13756, 13757}, + {6467, 13764, 13764}, + {5730, 13769, 13769}, + {5136, 13780, 13780}, + {724, 13784, 13785}, + {13517, 13789, 13791}, + {640, 13795, 13796}, + {7721, 13800, 13802}, + {11121, 13806, 13807}, + {5791, 13811, 13815}, + {12894, 13819, 13819}, + {11100, 13824, 13824}, + {7011, 13830, 13830}, + {7129, 13834, 13837}, + {13833, 13841, 13841}, + {11276, 13847, 13847}, + {13621, 13853, 13853}, + {13589, 13862, 13863}, + {12989, 13867, 13867}, + {12789, 13871, 13871}, + {1239, 13875, 13875}, + {4675, 13879, 13881}, + {4686, 13885, 13885}, + {707, 13889, 13889}, + {5449, 13897, 13898}, + {13867, 13902, 13903}, + {10613, 13908, 13908}, + {13789, 13912, 13914}, + {4451, 13918, 13919}, + {9200, 13924, 13924}, + {2011, 13930, 13930}, + {11433, 13934, 13936}, + {4695, 13942, 13943}, + {9435, 13948, 13951}, + {13688, 13955, 13957}, + {11694, 13961, 13962}, + {5712, 13966, 13966}, + {5991, 13970, 13972}, + {13477, 13976, 13976}, + {10213, 13987, 13987}, + {11839, 13991, 13993}, + {12272, 13997, 13997}, + {6206, 14001, 14001}, + {13179, 14006, 14007}, + {2939, 14011, 14011}, + {12972, 14016, 14017}, + {13918, 14021, 14022}, + {7436, 14026, 14027}, + {7678, 14032, 14034}, + {13586, 14040, 14040}, + {13347, 14044, 14044}, + {13109, 14048, 14051}, + {9244, 14055, 14057}, + {13315, 14061, 14061}, + {13276, 14067, 14067}, + {11435, 14073, 14074}, + {13853, 14078, 14078}, + {13452, 14082, 14082}, + {14044, 14087, 14087}, + {4440, 14091, 14095}, + {4479, 14100, 14103}, + {9395, 14107, 14109}, + {6834, 14119, 14119}, + {10458, 14123, 14124}, + {1429, 14129, 14129}, + {8443, 14135, 14135}, + {10365, 14140, 14140}, + {5267, 14145, 14145}, + {11834, 14151, 14153}, +} diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd new file mode 100644 index 00000000000..d24eb4b47c3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/runbench.cmd @@ -0,0 +1,2 @@ +del old.txt +go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 00000000000..c7f445f62c6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/snappy/snappy_test.go b/vendor/github.com/klauspost/compress/snappy/snappy_test.go new file mode 100644 index 00000000000..2712710df58 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy_test.go @@ -0,0 +1,1353 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +var ( + download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") + benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") +) + +// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by +// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on +// this GOARCH. There is more than one valid encoding of any given input, and +// there is more than one good algorithm along the frontier of trading off +// throughput for output size. Nonetheless, we presume that the C++ encoder's +// algorithm is a good one and has been tested on a wide range of inputs, so +// matching that exactly should mean that the Go encoder's algorithm is also +// good, without needing to gather our own corpus of test data. +// +// The exact algorithm used by the C++ code is potentially endian dependent, as +// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes +// at a time. The Go implementation is endian agnostic, in that its output is +// the same (as little-endian C++ code), regardless of the CPU's endianness. +// +// Thus, when comparing Go's output to C++ output generated beforehand, such as +// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- +// endian system, we can run that test regardless of the runtime.GOARCH value. +// +// When comparing Go's output to dynamically generated C++ output, i.e. the +// result of fork/exec'ing a C++ program, we can run that test only on +// little-endian systems, because the C++ output might be different on +// big-endian systems. The runtime package doesn't export endianness per se, +// but we can restrict this match-C++ test to common little-endian systems. +const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" + +func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { + got := maxEncodedLenOfMaxBlockSize + want := MaxEncodedLen(maxBlockSize) + if got != want { + t.Fatalf("got %d, want %d", got, want) + } +} + +func cmp(a, b []byte) error { + if bytes.Equal(a, b) { + return nil + } + if len(a) != len(b) { + return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) + } + for i := range a { + if a[i] != b[i] { + return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) + } + } + return nil +} + +func roundtrip(b, ebuf, dbuf []byte) error { + d, err := Decode(dbuf, Encode(ebuf, b)) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if err := cmp(d, b); err != nil { + return fmt.Errorf("roundtrip mismatch: %v", err) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rng := rand.New(rand.NewSource(1)) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(rng.Intn(256)) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestInvalidVarint(t *testing.T) { + testCases := []struct { + desc string + input string + }{{ + "invalid varint, final byte has continuation bit set", + "\xff", + }, { + "invalid varint, value overflows uint64", + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", + }, { + // https://github.com/google/snappy/blob/master/format_description.txt + // says that "the stream starts with the uncompressed length [as a + // varint] (up to a maximum of 2^32 - 1)". + "valid varint (as uint64), but value overflows uint32", + "\x80\x80\x80\x80\x10", + }} + + for _, tc := range testCases { + input := []byte(tc.input) + if _, err := DecodedLen(input); err != ErrCorrupt { + t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) + } + if _, err := Decode(nil, input); err != ErrCorrupt { + t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) + } + } +} + +func TestDecode(t *testing.T) { + lit40Bytes := make([]byte, 40) + for i := range lit40Bytes { + lit40Bytes[i] = byte(i) + } + lit40 := string(lit40Bytes) + + testCases := []struct { + desc string + input string + want string + wantErr error + }{{ + `decodedLen=0; valid input`, + "\x00", + "", + nil, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, + "\x03" + "\x08\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, + "\x02" + "\x08\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, + "\x03" + "\x08\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, + "\x28" + "\x9c" + lit40, + lit40, + nil, + }, { + `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, + "\x01" + "\xf0", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, + "\x03" + "\xf0\x02\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, + "\x01" + "\xf4\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, + "\x03" + "\xf4\x02\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, + "\x01" + "\xf8\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, + "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, + "\x01" + "\xfc\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, + "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, + "\x04" + "\xfc\x02\x00\x00\x00\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, + "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, + "\x04" + "\x01", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x02\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x03\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, + "\x04" + "\x0cabcd", + "abcd", + nil, + }, { + `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, + "\x0d" + "\x0cabcd" + "\x15\x04", + "abcdabcdabcda", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, + "\x08" + "\x0cabcd" + "\x01\x04", + "abcdabcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, + "\x08" + "\x0cabcd" + "\x01\x02", + "abcdcdcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, + "\x08" + "\x0cabcd" + "\x01\x01", + "abcddddd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, + "\x08" + "\x0cabcd" + "\x01\x00", + "", + ErrCorrupt, + }, { + `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, + "\x09" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, + "\x08" + "\x0cabcd" + "\x01\x05", + "", + ErrCorrupt, + }, { + `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, + "\x07" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x06\x03\x00", + "abcdbc", + nil, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", + "abcdbc", + nil, + }} + + const ( + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to dBuf + // to check that Decode does not write bytes past the end of + // dBuf[:dLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + + var dBuf [100]byte +loop: + for i, tc := range testCases { + input := []byte(tc.input) + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) + continue loop + } + } + + dLen, n := binary.Uvarint(input) + if n <= 0 { + t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) + continue + } + if dLen > uint64(len(dBuf)) { + t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) + continue + } + + for j := range dBuf { + dBuf[j] = byte(notPresentBase + j%notPresentLen) + } + g, gotErr := Decode(dBuf[:], input) + if got := string(g); got != tc.want || gotErr != tc.wantErr { + t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", + i, tc.desc, got, gotErr, tc.want, tc.wantErr) + continue + } + for j, x := range dBuf { + if uint64(j) < dLen { + continue + } + if w := byte(notPresentBase + j%notPresentLen); x != w { + t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", + i, tc.desc, j, x, w, dBuf) + continue loop + } + } + } +} + +func TestDecodeCopy4(t *testing.T) { + dots := strings.Repeat(".", 65536) + + input := strings.Join([]string{ + "\x89\x80\x04", // decodedLen = 65545. + "\x0cpqrs", // 4-byte literal "pqrs". + "\xf4\xff\xff" + dots, // 65536-byte literal dots. + "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. + }, "") + + gotBytes, err := Decode(nil, []byte(input)) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + want := "pqrs" + dots + "pqrs." + if len(got) != len(want) { + t.Fatalf("got %d bytes, want %d", len(got), len(want)) + } + if got != want { + for i := 0; i < len(got); i++ { + if g, w := got[i], want[i]; g != w { + t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) + } + } + } +} + +// TestDecodeLengthOffset tests decoding an encoding of the form literal + +// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". +func TestDecodeLengthOffset(t *testing.T) { + const ( + prefix = "abcdefghijklmnopqr" + suffix = "ABCDEFGHIJKLMNOPQR" + + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to + // gotBuf to check that Decode does not write bytes past the end of + // gotBuf[:totalLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + var gotBuf, wantBuf, inputBuf [128]byte + for length := 1; length <= 18; length++ { + for offset := 1; offset <= 18; offset++ { + loop: + for suffixLen := 0; suffixLen <= 18; suffixLen++ { + totalLen := len(prefix) + length + suffixLen + + inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) + inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], prefix) + inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) + inputBuf[inputLen+1] = byte(offset) + inputBuf[inputLen+2] = 0x00 + inputLen += 3 + if suffixLen > 0 { + inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) + } + input := inputBuf[:inputLen] + + for i := range gotBuf { + gotBuf[i] = byte(notPresentBase + i%notPresentLen) + } + got, err := Decode(gotBuf[:], input) + if err != nil { + t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) + continue + } + + wantLen := 0 + wantLen += copy(wantBuf[wantLen:], prefix) + for i := 0; i < length; i++ { + wantBuf[wantLen] = wantBuf[wantLen-offset] + wantLen++ + } + wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) + want := wantBuf[:wantLen] + + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", + length, offset, suffixLen, x, input) + continue loop + } + } + for i, x := range gotBuf { + if i < totalLen { + continue + } + if w := byte(notPresentBase + i%notPresentLen); x != w { + t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ + "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", + length, offset, suffixLen, totalLen, i, x, w, gotBuf) + continue loop + } + } + for _, x := range want { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", + length, offset, suffixLen, x, want) + continue loop + } + } + + if !bytes.Equal(got, want) { + t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", + length, offset, suffixLen, input, got, want) + continue + } + } + } + } +} + +const ( + goldenText = "Mark.Twain-Tom.Sawyer.txt" + goldenCompressed = goldenText + ".rawsnappy" +) + +func TestDecodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got, err := Decode(nil, src) + if err != nil { + t.Fatalf("Decode: %v", err) + } + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestEncodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got := Encode(nil, src) + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestExtendMatchGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + for i, tc := range extendMatchGoldenTestCases { + got := extendMatch(src, tc.i, tc.j) + if got != tc.want { + t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", + i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) + } + } +} + +func TestExtendMatch(t *testing.T) { + // ref is a simple, reference implementation of extendMatch. + ref := func(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j + } + + nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} + for yIndex := 40; yIndex > 30; yIndex-- { + xxx := bytes.Repeat([]byte("x"), 40) + if yIndex < len(xxx) { + xxx[yIndex] = 'y' + } + for _, i := range nums { + for _, j := range nums { + if i >= j { + continue + } + got := extendMatch(xxx, i, j) + want := ref(xxx, i, j) + if got != want { + t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) + } + } + } + } +} + +const snappytoolCmdName = "cmd/snappytool/snappytool" + +func skipTestSameEncodingAsCpp() (msg string) { + if !goEncoderShouldMatchCppEncoder { + return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) + } + if _, err := os.Stat(snappytoolCmdName); err != nil { + return fmt.Sprintf("could not find snappytool: %v", err) + } + return "" +} + +func runTestSameEncodingAsCpp(src []byte) error { + got := Encode(nil, src) + + cmd := exec.Command(snappytoolCmdName, "-e") + cmd.Stdin = bytes.NewReader(src) + want, err := cmd.Output() + if err != nil { + return fmt.Errorf("could not run snappytool: %v", err) + } + return cmp(got, want) +} + +func TestSameEncodingAsCppShortCopies(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + src := bytes.Repeat([]byte{'a'}, 20) + for i := 0; i <= len(src); i++ { + if err := runTestSameEncodingAsCpp(src[:i]); err != nil { + t.Errorf("i=%d: %v", i, err) + } + } +} + +func TestSameEncodingAsCppLongFiles(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + bDir := filepath.FromSlash(*benchdataDir) + failed := false + for i, tf := range testFiles { + if err := downloadBenchmarkFiles(t, tf.filename); err != nil { + t.Fatalf("failed to download testdata: %s", err) + } + data := readFile(t, filepath.Join(bDir, tf.filename)) + if n := tf.sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if err := runTestSameEncodingAsCpp(data); err != nil { + t.Errorf("i=%d: %v", i, err) + failed = true + } + } + if failed { + t.Errorf("was the snappytool program built against the C++ snappy library version " + + "d53de187 or later, commited on 2016-04-05? See " + + "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") + } +} + +// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm +// described in decode_amd64.s and its claim of a 10 byte overrun worst case. +func TestSlowForwardCopyOverrun(t *testing.T) { + const base = 100 + + for length := 1; length < 18; length++ { + for offset := 1; offset < 18; offset++ { + highWaterMark := base + d := base + l := length + o := offset + + // makeOffsetAtLeast8 + for o < 8 { + if end := d + 8; highWaterMark < end { + highWaterMark = end + } + l -= o + d += o + o += o + } + + // fixUpSlowForwardCopy + a := d + d += l + + // finishSlowForwardCopy + for l > 0 { + if end := a + 8; highWaterMark < end { + highWaterMark = end + } + a += 8 + l -= 8 + } + + dWant := base + length + overrun := highWaterMark - dWant + if d != dWant || overrun < 0 || 10 < overrun { + t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", + length, offset, d, overrun, dWant) + } + } + } +} + +// TestEncodeNoiseThenRepeats encodes input for which the first half is very +// incompressible and the second half is very compressible. The encoded form's +// length should be closer to 50% of the original length than 100%. +func TestEncodeNoiseThenRepeats(t *testing.T) { + for _, origLen := range []int{256 * 1024, 2048 * 1024} { + src := make([]byte, origLen) + rng := rand.New(rand.NewSource(1)) + firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] + for i := range firstHalf { + firstHalf[i] = uint8(rng.Intn(256)) + } + for i := range secondHalf { + secondHalf[i] = uint8(i >> 8) + } + dst := Encode(nil, src) + if got, want := len(dst), origLen*3/4; got >= want { + t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) + } + } +} + +func TestFramingFormat(t *testing.T) { + // src is comprised of alternating 1e5-sized sequences of random + // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen + // because it is larger than maxBlockSize (64k). + src := make([]byte, 1e6) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 10; i++ { + if i%2 == 0 { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(rng.Intn(256)) + } + } else { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(i) + } + } + } + + buf := new(bytes.Buffer) + if _, err := NewWriter(buf).Write(src); err != nil { + t.Fatalf("Write: encoding: %v", err) + } + dst, err := ioutil.ReadAll(NewReader(buf)) + if err != nil { + t.Fatalf("ReadAll: decoding: %v", err) + } + if err := cmp(dst, src); err != nil { + t.Fatal(err) + } +} + +func TestWriterGoldenOutput(t *testing.T) { + buf := new(bytes.Buffer) + w := NewBufferedWriter(buf) + defer w.Close() + w.Write([]byte("abcd")) // Not compressible. + w.Flush() + w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. + w.Flush() + // The next chunk is also compressible, but a naive, greedy encoding of the + // overall length 67 copy as a length 64 copy (the longest expressible as a + // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte + // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 + // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 + // (of length 60) and a 2-byte tagCopy1 (of length 7). + w.Write(bytes.Repeat([]byte{'B'}, 68)) + w.Write([]byte("efC")) // Not compressible. + w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. + w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. + w.Write([]byte("g")) // Not compressible. + w.Flush() + + got := buf.String() + want := strings.Join([]string{ + magicChunk, + "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). + "\x68\x10\xe6\xb6", // Checksum. + "\x61\x62\x63\x64", // Uncompressed payload: "abcd". + "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). + "\x5f\xeb\xf2\x10", // Checksum. + "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. + "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. + "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). + "\x30\x85\x69\xeb", // Checksum. + "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. + "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". + "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. + "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. + "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". + "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. + "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. + "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". + }, "") + if got != want { + t.Fatalf("\ngot: % x\nwant: % x", got, want) + } +} + +func TestEmitLiteral(t *testing.T) { + testCases := []struct { + length int + want string + }{ + {1, "\x00"}, + {2, "\x04"}, + {59, "\xe8"}, + {60, "\xec"}, + {61, "\xf0\x3c"}, + {62, "\xf0\x3d"}, + {254, "\xf0\xfd"}, + {255, "\xf0\xfe"}, + {256, "\xf0\xff"}, + {257, "\xf4\x00\x01"}, + {65534, "\xf4\xfd\xff"}, + {65535, "\xf4\xfe\xff"}, + {65536, "\xf4\xff\xff"}, + } + + dst := make([]byte, 70000) + nines := bytes.Repeat([]byte{0x99}, 65536) + for _, tc := range testCases { + lit := nines[:tc.length] + n := emitLiteral(dst, lit) + if !bytes.HasSuffix(dst[:n], lit) { + t.Errorf("length=%d: did not end with that many literal bytes", tc.length) + continue + } + got := string(dst[:n-tc.length]) + if got != tc.want { + t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) + continue + } + } +} + +func TestEmitCopy(t *testing.T) { + testCases := []struct { + offset int + length int + want string + }{ + {8, 04, "\x01\x08"}, + {8, 11, "\x1d\x08"}, + {8, 12, "\x2e\x08\x00"}, + {8, 13, "\x32\x08\x00"}, + {8, 59, "\xea\x08\x00"}, + {8, 60, "\xee\x08\x00"}, + {8, 61, "\xf2\x08\x00"}, + {8, 62, "\xf6\x08\x00"}, + {8, 63, "\xfa\x08\x00"}, + {8, 64, "\xfe\x08\x00"}, + {8, 65, "\xee\x08\x00\x05\x08"}, + {8, 66, "\xee\x08\x00\x09\x08"}, + {8, 67, "\xee\x08\x00\x0d\x08"}, + {8, 68, "\xfe\x08\x00\x01\x08"}, + {8, 69, "\xfe\x08\x00\x05\x08"}, + {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, + + {256, 04, "\x21\x00"}, + {256, 11, "\x3d\x00"}, + {256, 12, "\x2e\x00\x01"}, + {256, 13, "\x32\x00\x01"}, + {256, 59, "\xea\x00\x01"}, + {256, 60, "\xee\x00\x01"}, + {256, 61, "\xf2\x00\x01"}, + {256, 62, "\xf6\x00\x01"}, + {256, 63, "\xfa\x00\x01"}, + {256, 64, "\xfe\x00\x01"}, + {256, 65, "\xee\x00\x01\x25\x00"}, + {256, 66, "\xee\x00\x01\x29\x00"}, + {256, 67, "\xee\x00\x01\x2d\x00"}, + {256, 68, "\xfe\x00\x01\x21\x00"}, + {256, 69, "\xfe\x00\x01\x25\x00"}, + {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, + + {2048, 04, "\x0e\x00\x08"}, + {2048, 11, "\x2a\x00\x08"}, + {2048, 12, "\x2e\x00\x08"}, + {2048, 13, "\x32\x00\x08"}, + {2048, 59, "\xea\x00\x08"}, + {2048, 60, "\xee\x00\x08"}, + {2048, 61, "\xf2\x00\x08"}, + {2048, 62, "\xf6\x00\x08"}, + {2048, 63, "\xfa\x00\x08"}, + {2048, 64, "\xfe\x00\x08"}, + {2048, 65, "\xee\x00\x08\x12\x00\x08"}, + {2048, 66, "\xee\x00\x08\x16\x00\x08"}, + {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, + {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, + {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, + {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, + } + + dst := make([]byte, 1024) + for _, tc := range testCases { + n := emitCopy(dst, tc.offset, tc.length) + got := string(dst[:n]) + if got != tc.want { + t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) + } + } +} + +func TestNewBufferedWriter(t *testing.T) { + // Test all 32 possible sub-sequences of these 5 input slices. + // + // Their lengths sum to 400,000, which is over 6 times the Writer ibuf + // capacity: 6 * maxBlockSize is 393,216. + inputs := [][]byte{ + bytes.Repeat([]byte{'a'}, 40000), + bytes.Repeat([]byte{'b'}, 150000), + bytes.Repeat([]byte{'c'}, 60000), + bytes.Repeat([]byte{'d'}, 120000), + bytes.Repeat([]byte{'e'}, 30000), + } +loop: + for i := 0; i < 1< 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } +func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } +func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +func BenchmarkRandomEncode(b *testing.B) { + rng := rand.New(rand.NewSource(1)) + data := make([]byte, 1<<20) + for i := range data { + data[i] = uint8(rng.Intn(256)) + } + benchEncode(b, data) +} + +// testFiles' values are copied directly from +// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string + sizeLimit int +}{ + {"html", "html", 0}, + {"urls", "urls.10K", 0}, + {"jpg", "fireworks.jpeg", 0}, + {"jpg_200", "fireworks.jpeg", 200}, + {"pdf", "paper-100k.pdf", 0}, + {"html4", "html_x_4", 0}, + {"txt1", "alice29.txt", 0}, + {"txt2", "asyoulik.txt", 0}, + {"txt3", "lcet10.txt", 0}, + {"txt4", "plrabn12.txt", 0}, + {"pb", "geo.protodata", 0}, + {"gaviota", "kppkn.gtb", 0}, +} + +const ( + // The benchmark data files are at this canonical URL. + benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" +) + +func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { + bDir := filepath.FromSlash(*benchdataDir) + filename := filepath.Join(bDir, basename) + if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { + return nil + } + + if !*download { + b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create %s: %s", bDir, err) + } + + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + url := benchURL + basename + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("failed to download %s: %s", url, err) + } + defer resp.Body.Close() + if s := resp.StatusCode; s != http.StatusOK { + return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) + } + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) + } + return nil +} + +func benchFile(b *testing.B, i int, decode bool) { + if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + bDir := filepath.FromSlash(*benchdataDir) + data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) + if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } + +func BenchmarkExtendMatch(b *testing.B) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + b.Fatalf("ReadFile: %v", err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tc := range extendMatchGoldenTestCases { + extendMatch(src, tc.i, tc.j) + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt new file mode 100644 index 00000000000..86a18750bfc --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt @@ -0,0 +1,396 @@ +Produced by David Widger. The previous edition was updated by Jose +Menendez. + + + + + + THE ADVENTURES OF TOM SAWYER + BY + MARK TWAIN + (Samuel Langhorne Clemens) + + + + + P R E F A C E + +MOST of the adventures recorded in this book really occurred; one or +two were experiences of my own, the rest those of boys who were +schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but +not from an individual--he is a combination of the characteristics of +three boys whom I knew, and therefore belongs to the composite order of +architecture. + +The odd superstitions touched upon were all prevalent among children +and slaves in the West at the period of this story--that is to say, +thirty or forty years ago. + +Although my book is intended mainly for the entertainment of boys and +girls, I hope it will not be shunned by men and women on that account, +for part of my plan has been to try to pleasantly remind adults of what +they once were themselves, and of how they felt and thought and talked, +and what queer enterprises they sometimes engaged in. + + THE AUTHOR. + +HARTFORD, 1876. + + + + T O M S A W Y E R + + + +CHAPTER I + +"TOM!" + +No answer. + +"TOM!" + +No answer. + +"What's gone with that boy, I wonder? You TOM!" + +No answer. + +The old lady pulled her spectacles down and looked over them about the +room; then she put them up and looked out under them. She seldom or +never looked THROUGH them for so small a thing as a boy; they were her +state pair, the pride of her heart, and were built for "style," not +service--she could have seen through a pair of stove-lids just as well. +She looked perplexed for a moment, and then said, not fiercely, but +still loud enough for the furniture to hear: + +"Well, I lay if I get hold of you I'll--" + +She did not finish, for by this time she was bending down and punching +under the bed with the broom, and so she needed breath to punctuate the +punches with. She resurrected nothing but the cat. + +"I never did see the beat of that boy!" + +She went to the open door and stood in it and looked out among the +tomato vines and "jimpson" weeds that constituted the garden. No Tom. +So she lifted up her voice at an angle calculated for distance and +shouted: + +"Y-o-u-u TOM!" + +There was a slight noise behind her and she turned just in time to +seize a small boy by the slack of his roundabout and arrest his flight. + +"There! I might 'a' thought of that closet. What you been doing in +there?" + +"Nothing." + +"Nothing! Look at your hands. And look at your mouth. What IS that +truck?" + +"I don't know, aunt." + +"Well, I know. It's jam--that's what it is. Forty times I've said if +you didn't let that jam alone I'd skin you. Hand me that switch." + +The switch hovered in the air--the peril was desperate-- + +"My! Look behind you, aunt!" + +The old lady whirled round, and snatched her skirts out of danger. The +lad fled on the instant, scrambled up the high board-fence, and +disappeared over it. + +His aunt Polly stood surprised a moment, and then broke into a gentle +laugh. + +"Hang the boy, can't I never learn anything? Ain't he played me tricks +enough like that for me to be looking out for him by this time? But old +fools is the biggest fools there is. Can't learn an old dog new tricks, +as the saying is. But my goodness, he never plays them alike, two days, +and how is a body to know what's coming? He 'pears to know just how +long he can torment me before I get my dander up, and he knows if he +can make out to put me off for a minute or make me laugh, it's all down +again and I can't hit him a lick. I ain't doing my duty by that boy, +and that's the Lord's truth, goodness knows. Spare the rod and spile +the child, as the Good Book says. I'm a laying up sin and suffering for +us both, I know. He's full of the Old Scratch, but laws-a-me! he's my +own dead sister's boy, poor thing, and I ain't got the heart to lash +him, somehow. Every time I let him off, my conscience does hurt me so, +and every time I hit him my old heart most breaks. Well-a-well, man +that is born of woman is of few days and full of trouble, as the +Scripture says, and I reckon it's so. He'll play hookey this evening, * +and [* Southwestern for "afternoon"] I'll just be obleeged to make him +work, to-morrow, to punish him. It's mighty hard to make him work +Saturdays, when all the boys is having holiday, but he hates work more +than he hates anything else, and I've GOT to do some of my duty by him, +or I'll be the ruination of the child." + +Tom did play hookey, and he had a very good time. He got back home +barely in season to help Jim, the small colored boy, saw next-day's +wood and split the kindlings before supper--at least he was there in +time to tell his adventures to Jim while Jim did three-fourths of the +work. Tom's younger brother (or rather half-brother) Sid was already +through with his part of the work (picking up chips), for he was a +quiet boy, and had no adventurous, troublesome ways. + +While Tom was eating his supper, and stealing sugar as opportunity +offered, Aunt Polly asked him questions that were full of guile, and +very deep--for she wanted to trap him into damaging revealments. Like +many other simple-hearted souls, it was her pet vanity to believe she +was endowed with a talent for dark and mysterious diplomacy, and she +loved to contemplate her most transparent devices as marvels of low +cunning. Said she: + +"Tom, it was middling warm in school, warn't it?" + +"Yes'm." + +"Powerful warm, warn't it?" + +"Yes'm." + +"Didn't you want to go in a-swimming, Tom?" + +A bit of a scare shot through Tom--a touch of uncomfortable suspicion. +He searched Aunt Polly's face, but it told him nothing. So he said: + +"No'm--well, not very much." + +The old lady reached out her hand and felt Tom's shirt, and said: + +"But you ain't too warm now, though." And it flattered her to reflect +that she had discovered that the shirt was dry without anybody knowing +that that was what she had in her mind. But in spite of her, Tom knew +where the wind lay, now. So he forestalled what might be the next move: + +"Some of us pumped on our heads--mine's damp yet. See?" + +Aunt Polly was vexed to think she had overlooked that bit of +circumstantial evidence, and missed a trick. Then she had a new +inspiration: + +"Tom, you didn't have to undo your shirt collar where I sewed it, to +pump on your head, did you? Unbutton your jacket!" + +The trouble vanished out of Tom's face. He opened his jacket. His +shirt collar was securely sewed. + +"Bother! Well, go 'long with you. I'd made sure you'd played hookey +and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a +singed cat, as the saying is--better'n you look. THIS time." + +She was half sorry her sagacity had miscarried, and half glad that Tom +had stumbled into obedient conduct for once. + +But Sidney said: + +"Well, now, if I didn't think you sewed his collar with white thread, +but it's black." + +"Why, I did sew it with white! Tom!" + +But Tom did not wait for the rest. As he went out at the door he said: + +"Siddy, I'll lick you for that." + +In a safe place Tom examined two large needles which were thrust into +the lapels of his jacket, and had thread bound about them--one needle +carried white thread and the other black. He said: + +"She'd never noticed if it hadn't been for Sid. Confound it! sometimes +she sews it with white, and sometimes she sews it with black. I wish to +geeminy she'd stick to one or t'other--I can't keep the run of 'em. But +I bet you I'll lam Sid for that. I'll learn him!" + +He was not the Model Boy of the village. He knew the model boy very +well though--and loathed him. + +Within two minutes, or even less, he had forgotten all his troubles. +Not because his troubles were one whit less heavy and bitter to him +than a man's are to a man, but because a new and powerful interest bore +them down and drove them out of his mind for the time--just as men's +misfortunes are forgotten in the excitement of new enterprises. This +new interest was a valued novelty in whistling, which he had just +acquired from a negro, and he was suffering to practise it undisturbed. +It consisted in a peculiar bird-like turn, a sort of liquid warble, +produced by touching the tongue to the roof of the mouth at short +intervals in the midst of the music--the reader probably remembers how +to do it, if he has ever been a boy. Diligence and attention soon gave +him the knack of it, and he strode down the street with his mouth full +of harmony and his soul full of gratitude. He felt much as an +astronomer feels who has discovered a new planet--no doubt, as far as +strong, deep, unalloyed pleasure is concerned, the advantage was with +the boy, not the astronomer. + +The summer evenings were long. It was not dark, yet. Presently Tom +checked his whistle. A stranger was before him--a boy a shade larger +than himself. A new-comer of any age or either sex was an impressive +curiosity in the poor little shabby village of St. Petersburg. This boy +was well dressed, too--well dressed on a week-day. This was simply +astounding. His cap was a dainty thing, his close-buttoned blue cloth +roundabout was new and natty, and so were his pantaloons. He had shoes +on--and it was only Friday. He even wore a necktie, a bright bit of +ribbon. He had a citified air about him that ate into Tom's vitals. The +more Tom stared at the splendid marvel, the higher he turned up his +nose at his finery and the shabbier and shabbier his own outfit seemed +to him to grow. Neither boy spoke. If one moved, the other moved--but +only sidewise, in a circle; they kept face to face and eye to eye all +the time. Finally Tom said: + +"I can lick you!" + +"I'd like to see you try it." + +"Well, I can do it." + +"No you can't, either." + +"Yes I can." + +"No you can't." + +"I can." + +"You can't." + +"Can!" + +"Can't!" + +An uncomfortable pause. Then Tom said: + +"What's your name?" + +"'Tisn't any of your business, maybe." + +"Well I 'low I'll MAKE it my business." + +"Well why don't you?" + +"If you say much, I will." + +"Much--much--MUCH. There now." + +"Oh, you think you're mighty smart, DON'T you? I could lick you with +one hand tied behind me, if I wanted to." + +"Well why don't you DO it? You SAY you can do it." + +"Well I WILL, if you fool with me." + +"Oh yes--I've seen whole families in the same fix." + +"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" + +"You can lump that hat if you don't like it. I dare you to knock it +off--and anybody that'll take a dare will suck eggs." + +"You're a liar!" + +"You're another." + +"You're a fighting liar and dasn't take it up." + +"Aw--take a walk!" + +"Say--if you give me much more of your sass I'll take and bounce a +rock off'n your head." + +"Oh, of COURSE you will." + +"Well I WILL." + +"Well why don't you DO it then? What do you keep SAYING you will for? +Why don't you DO it? It's because you're afraid." + +"I AIN'T afraid." + +"You are." + +"I ain't." + +"You are." + +Another pause, and more eying and sidling around each other. Presently +they were shoulder to shoulder. Tom said: + +"Get away from here!" + +"Go away yourself!" + +"I won't." + +"I won't either." + +So they stood, each with a foot placed at an angle as a brace, and +both shoving with might and main, and glowering at each other with +hate. But neither could get an advantage. After struggling till both +were hot and flushed, each relaxed his strain with watchful caution, +and Tom said: + +"You're a coward and a pup. I'll tell my big brother on you, and he +can thrash you with his little finger, and I'll make him do it, too." + +"What do I care for your big brother? I've got a brother that's bigger +than he is--and what's more, he can throw him over that fence, too." +[Both brothers were imaginary.] + +"That's a lie." + +"YOUR saying so don't make it so." + +Tom drew a line in the dust with his big toe, and said: + +"I dare you to step over that, and I'll lick you till you can't stand +up. Anybody that'll take a dare will steal sheep." + +The new boy stepped over promptly, and said: + +"Now you said you'd do it, now let's see you do it." + +"Don't you crowd me now; you better look out." + +"Well, you SAID you'd do it--why don't you do it?" + +"By jingo! for two cents I WILL do it." + +The new boy took two broad coppers out of his pocket and held them out +with derision. Tom struck them to the ground. In an instant both boys +were rolling and tumbling in the dirt, gripped together like cats; and +for the space of a minute they tugged and tore at each other's hair and +clothes, punched and scratched each other's nose, and covered +themselves with dust and glory. Presently the confusion took form, and +through the fog of battle Tom appeared, seated astride the new boy, and +pounding him with his fists. "Holler 'nuff!" said he. + +The boy only struggled to free himself. He was crying--mainly from rage. + +"Holler 'nuff!"--and the pounding went on. + +At last the stranger got out a smothered "'Nuff!" and Tom let him up +and said: + +"Now that'll learn you. Better look out who you're fooling with next +time." + +The new boy went off brushing the dust from his clothes, sobbing, +snuffling, and occasionally looking back and shaking his head and +threatening what he would do to Tom the "next time he caught him out." +To which Tom responded with jeers, and started off in high feather, and +as soon as his back was turned the new boy snatched up a stone, threw +it and hit him between the shoulders and then turned tail and ran like +an antelope. Tom chased the traitor home, and thus found out where he +lived. He then held a position at the gate for some time, daring the +enemy to come outside, but the enemy only made faces at him through the +window and declined. At last the enemy's mother appeared, and called +Tom a bad, vicious, vulgar child, and ordered him away. So he went +away; but he said he "'lowed" to "lay" for that boy. + +He got home pretty late that night, and when he climbed cautiously in +at the window, he uncovered an ambuscade, in the person of his aunt; +and when she saw the state his clothes were in her resolution to turn +his Saturday holiday into captivity at hard labor became adamantine in +its firmness. diff --git a/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy new file mode 100644 index 00000000000..9c56d985888 Binary files /dev/null and b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy differ diff --git a/vendor/github.com/klauspost/compress/snappy/testdata/random b/vendor/github.com/klauspost/compress/snappy/testdata/random new file mode 100644 index 00000000000..d5e9606f033 Binary files /dev/null and b/vendor/github.com/klauspost/compress/snappy/testdata/random differ diff --git a/vendor/github.com/klauspost/compress/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/klauspost/compress/testdata/Mark.Twain-Tom.Sawyer.txt new file mode 100644 index 00000000000..565627a9253 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/Mark.Twain-Tom.Sawyer.txt @@ -0,0 +1,8472 @@ +Produced by David Widger. The previous edition was updated by Jose +Menendez. + + + + + + THE ADVENTURES OF TOM SAWYER + BY + MARK TWAIN + (Samuel Langhorne Clemens) + + + + + P R E F A C E + +MOST of the adventures recorded in this book really occurred; one or +two were experiences of my own, the rest those of boys who were +schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but +not from an individual--he is a combination of the characteristics of +three boys whom I knew, and therefore belongs to the composite order of +architecture. + +The odd superstitions touched upon were all prevalent among children +and slaves in the West at the period of this story--that is to say, +thirty or forty years ago. + +Although my book is intended mainly for the entertainment of boys and +girls, I hope it will not be shunned by men and women on that account, +for part of my plan has been to try to pleasantly remind adults of what +they once were themselves, and of how they felt and thought and talked, +and what queer enterprises they sometimes engaged in. + + THE AUTHOR. + +HARTFORD, 1876. + + + + T O M S A W Y E R + + + +CHAPTER I + +"TOM!" + +No answer. + +"TOM!" + +No answer. + +"What's gone with that boy, I wonder? You TOM!" + +No answer. + +The old lady pulled her spectacles down and looked over them about the +room; then she put them up and looked out under them. She seldom or +never looked THROUGH them for so small a thing as a boy; they were her +state pair, the pride of her heart, and were built for "style," not +service--she could have seen through a pair of stove-lids just as well. +She looked perplexed for a moment, and then said, not fiercely, but +still loud enough for the furniture to hear: + +"Well, I lay if I get hold of you I'll--" + +She did not finish, for by this time she was bending down and punching +under the bed with the broom, and so she needed breath to punctuate the +punches with. She resurrected nothing but the cat. + +"I never did see the beat of that boy!" + +She went to the open door and stood in it and looked out among the +tomato vines and "jimpson" weeds that constituted the garden. No Tom. +So she lifted up her voice at an angle calculated for distance and +shouted: + +"Y-o-u-u TOM!" + +There was a slight noise behind her and she turned just in time to +seize a small boy by the slack of his roundabout and arrest his flight. + +"There! I might 'a' thought of that closet. What you been doing in +there?" + +"Nothing." + +"Nothing! Look at your hands. And look at your mouth. What IS that +truck?" + +"I don't know, aunt." + +"Well, I know. It's jam--that's what it is. Forty times I've said if +you didn't let that jam alone I'd skin you. Hand me that switch." + +The switch hovered in the air--the peril was desperate-- + +"My! Look behind you, aunt!" + +The old lady whirled round, and snatched her skirts out of danger. The +lad fled on the instant, scrambled up the high board-fence, and +disappeared over it. + +His aunt Polly stood surprised a moment, and then broke into a gentle +laugh. + +"Hang the boy, can't I never learn anything? Ain't he played me tricks +enough like that for me to be looking out for him by this time? But old +fools is the biggest fools there is. Can't learn an old dog new tricks, +as the saying is. But my goodness, he never plays them alike, two days, +and how is a body to know what's coming? He 'pears to know just how +long he can torment me before I get my dander up, and he knows if he +can make out to put me off for a minute or make me laugh, it's all down +again and I can't hit him a lick. I ain't doing my duty by that boy, +and that's the Lord's truth, goodness knows. Spare the rod and spile +the child, as the Good Book says. I'm a laying up sin and suffering for +us both, I know. He's full of the Old Scratch, but laws-a-me! he's my +own dead sister's boy, poor thing, and I ain't got the heart to lash +him, somehow. Every time I let him off, my conscience does hurt me so, +and every time I hit him my old heart most breaks. Well-a-well, man +that is born of woman is of few days and full of trouble, as the +Scripture says, and I reckon it's so. He'll play hookey this evening, * +and [* Southwestern for "afternoon"] I'll just be obleeged to make him +work, to-morrow, to punish him. It's mighty hard to make him work +Saturdays, when all the boys is having holiday, but he hates work more +than he hates anything else, and I've GOT to do some of my duty by him, +or I'll be the ruination of the child." + +Tom did play hookey, and he had a very good time. He got back home +barely in season to help Jim, the small colored boy, saw next-day's +wood and split the kindlings before supper--at least he was there in +time to tell his adventures to Jim while Jim did three-fourths of the +work. Tom's younger brother (or rather half-brother) Sid was already +through with his part of the work (picking up chips), for he was a +quiet boy, and had no adventurous, troublesome ways. + +While Tom was eating his supper, and stealing sugar as opportunity +offered, Aunt Polly asked him questions that were full of guile, and +very deep--for she wanted to trap him into damaging revealments. Like +many other simple-hearted souls, it was her pet vanity to believe she +was endowed with a talent for dark and mysterious diplomacy, and she +loved to contemplate her most transparent devices as marvels of low +cunning. Said she: + +"Tom, it was middling warm in school, warn't it?" + +"Yes'm." + +"Powerful warm, warn't it?" + +"Yes'm." + +"Didn't you want to go in a-swimming, Tom?" + +A bit of a scare shot through Tom--a touch of uncomfortable suspicion. +He searched Aunt Polly's face, but it told him nothing. So he said: + +"No'm--well, not very much." + +The old lady reached out her hand and felt Tom's shirt, and said: + +"But you ain't too warm now, though." And it flattered her to reflect +that she had discovered that the shirt was dry without anybody knowing +that that was what she had in her mind. But in spite of her, Tom knew +where the wind lay, now. So he forestalled what might be the next move: + +"Some of us pumped on our heads--mine's damp yet. See?" + +Aunt Polly was vexed to think she had overlooked that bit of +circumstantial evidence, and missed a trick. Then she had a new +inspiration: + +"Tom, you didn't have to undo your shirt collar where I sewed it, to +pump on your head, did you? Unbutton your jacket!" + +The trouble vanished out of Tom's face. He opened his jacket. His +shirt collar was securely sewed. + +"Bother! Well, go 'long with you. I'd made sure you'd played hookey +and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a +singed cat, as the saying is--better'n you look. THIS time." + +She was half sorry her sagacity had miscarried, and half glad that Tom +had stumbled into obedient conduct for once. + +But Sidney said: + +"Well, now, if I didn't think you sewed his collar with white thread, +but it's black." + +"Why, I did sew it with white! Tom!" + +But Tom did not wait for the rest. As he went out at the door he said: + +"Siddy, I'll lick you for that." + +In a safe place Tom examined two large needles which were thrust into +the lapels of his jacket, and had thread bound about them--one needle +carried white thread and the other black. He said: + +"She'd never noticed if it hadn't been for Sid. Confound it! sometimes +she sews it with white, and sometimes she sews it with black. I wish to +geeminy she'd stick to one or t'other--I can't keep the run of 'em. But +I bet you I'll lam Sid for that. I'll learn him!" + +He was not the Model Boy of the village. He knew the model boy very +well though--and loathed him. + +Within two minutes, or even less, he had forgotten all his troubles. +Not because his troubles were one whit less heavy and bitter to him +than a man's are to a man, but because a new and powerful interest bore +them down and drove them out of his mind for the time--just as men's +misfortunes are forgotten in the excitement of new enterprises. This +new interest was a valued novelty in whistling, which he had just +acquired from a negro, and he was suffering to practise it undisturbed. +It consisted in a peculiar bird-like turn, a sort of liquid warble, +produced by touching the tongue to the roof of the mouth at short +intervals in the midst of the music--the reader probably remembers how +to do it, if he has ever been a boy. Diligence and attention soon gave +him the knack of it, and he strode down the street with his mouth full +of harmony and his soul full of gratitude. He felt much as an +astronomer feels who has discovered a new planet--no doubt, as far as +strong, deep, unalloyed pleasure is concerned, the advantage was with +the boy, not the astronomer. + +The summer evenings were long. It was not dark, yet. Presently Tom +checked his whistle. A stranger was before him--a boy a shade larger +than himself. A new-comer of any age or either sex was an impressive +curiosity in the poor little shabby village of St. Petersburg. This boy +was well dressed, too--well dressed on a week-day. This was simply +astounding. His cap was a dainty thing, his close-buttoned blue cloth +roundabout was new and natty, and so were his pantaloons. He had shoes +on--and it was only Friday. He even wore a necktie, a bright bit of +ribbon. He had a citified air about him that ate into Tom's vitals. The +more Tom stared at the splendid marvel, the higher he turned up his +nose at his finery and the shabbier and shabbier his own outfit seemed +to him to grow. Neither boy spoke. If one moved, the other moved--but +only sidewise, in a circle; they kept face to face and eye to eye all +the time. Finally Tom said: + +"I can lick you!" + +"I'd like to see you try it." + +"Well, I can do it." + +"No you can't, either." + +"Yes I can." + +"No you can't." + +"I can." + +"You can't." + +"Can!" + +"Can't!" + +An uncomfortable pause. Then Tom said: + +"What's your name?" + +"'Tisn't any of your business, maybe." + +"Well I 'low I'll MAKE it my business." + +"Well why don't you?" + +"If you say much, I will." + +"Much--much--MUCH. There now." + +"Oh, you think you're mighty smart, DON'T you? I could lick you with +one hand tied behind me, if I wanted to." + +"Well why don't you DO it? You SAY you can do it." + +"Well I WILL, if you fool with me." + +"Oh yes--I've seen whole families in the same fix." + +"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" + +"You can lump that hat if you don't like it. I dare you to knock it +off--and anybody that'll take a dare will suck eggs." + +"You're a liar!" + +"You're another." + +"You're a fighting liar and dasn't take it up." + +"Aw--take a walk!" + +"Say--if you give me much more of your sass I'll take and bounce a +rock off'n your head." + +"Oh, of COURSE you will." + +"Well I WILL." + +"Well why don't you DO it then? What do you keep SAYING you will for? +Why don't you DO it? It's because you're afraid." + +"I AIN'T afraid." + +"You are." + +"I ain't." + +"You are." + +Another pause, and more eying and sidling around each other. Presently +they were shoulder to shoulder. Tom said: + +"Get away from here!" + +"Go away yourself!" + +"I won't." + +"I won't either." + +So they stood, each with a foot placed at an angle as a brace, and +both shoving with might and main, and glowering at each other with +hate. But neither could get an advantage. After struggling till both +were hot and flushed, each relaxed his strain with watchful caution, +and Tom said: + +"You're a coward and a pup. I'll tell my big brother on you, and he +can thrash you with his little finger, and I'll make him do it, too." + +"What do I care for your big brother? I've got a brother that's bigger +than he is--and what's more, he can throw him over that fence, too." +[Both brothers were imaginary.] + +"That's a lie." + +"YOUR saying so don't make it so." + +Tom drew a line in the dust with his big toe, and said: + +"I dare you to step over that, and I'll lick you till you can't stand +up. Anybody that'll take a dare will steal sheep." + +The new boy stepped over promptly, and said: + +"Now you said you'd do it, now let's see you do it." + +"Don't you crowd me now; you better look out." + +"Well, you SAID you'd do it--why don't you do it?" + +"By jingo! for two cents I WILL do it." + +The new boy took two broad coppers out of his pocket and held them out +with derision. Tom struck them to the ground. In an instant both boys +were rolling and tumbling in the dirt, gripped together like cats; and +for the space of a minute they tugged and tore at each other's hair and +clothes, punched and scratched each other's nose, and covered +themselves with dust and glory. Presently the confusion took form, and +through the fog of battle Tom appeared, seated astride the new boy, and +pounding him with his fists. "Holler 'nuff!" said he. + +The boy only struggled to free himself. He was crying--mainly from rage. + +"Holler 'nuff!"--and the pounding went on. + +At last the stranger got out a smothered "'Nuff!" and Tom let him up +and said: + +"Now that'll learn you. Better look out who you're fooling with next +time." + +The new boy went off brushing the dust from his clothes, sobbing, +snuffling, and occasionally looking back and shaking his head and +threatening what he would do to Tom the "next time he caught him out." +To which Tom responded with jeers, and started off in high feather, and +as soon as his back was turned the new boy snatched up a stone, threw +it and hit him between the shoulders and then turned tail and ran like +an antelope. Tom chased the traitor home, and thus found out where he +lived. He then held a position at the gate for some time, daring the +enemy to come outside, but the enemy only made faces at him through the +window and declined. At last the enemy's mother appeared, and called +Tom a bad, vicious, vulgar child, and ordered him away. So he went +away; but he said he "'lowed" to "lay" for that boy. + +He got home pretty late that night, and when he climbed cautiously in +at the window, he uncovered an ambuscade, in the person of his aunt; +and when she saw the state his clothes were in her resolution to turn +his Saturday holiday into captivity at hard labor became adamantine in +its firmness. + + + +CHAPTER II + +SATURDAY morning was come, and all the summer world was bright and +fresh, and brimming with life. There was a song in every heart; and if +the heart was young the music issued at the lips. There was cheer in +every face and a spring in every step. The locust-trees were in bloom +and the fragrance of the blossoms filled the air. Cardiff Hill, beyond +the village and above it, was green with vegetation and it lay just far +enough away to seem a Delectable Land, dreamy, reposeful, and inviting. + +Tom appeared on the sidewalk with a bucket of whitewash and a +long-handled brush. He surveyed the fence, and all gladness left him and +a deep melancholy settled down upon his spirit. Thirty yards of board +fence nine feet high. Life to him seemed hollow, and existence but a +burden. Sighing, he dipped his brush and passed it along the topmost +plank; repeated the operation; did it again; compared the insignificant +whitewashed streak with the far-reaching continent of unwhitewashed +fence, and sat down on a tree-box discouraged. Jim came skipping out at +the gate with a tin pail, and singing Buffalo Gals. Bringing water from +the town pump had always been hateful work in Tom's eyes, before, but +now it did not strike him so. He remembered that there was company at +the pump. White, mulatto, and negro boys and girls were always there +waiting their turns, resting, trading playthings, quarrelling, +fighting, skylarking. And he remembered that although the pump was only +a hundred and fifty yards off, Jim never got back with a bucket of +water under an hour--and even then somebody generally had to go after +him. Tom said: + +"Say, Jim, I'll fetch the water if you'll whitewash some." + +Jim shook his head and said: + +"Can't, Mars Tom. Ole missis, she tole me I got to go an' git dis +water an' not stop foolin' roun' wid anybody. She say she spec' Mars +Tom gwine to ax me to whitewash, an' so she tole me go 'long an' 'tend +to my own business--she 'lowed SHE'D 'tend to de whitewashin'." + +"Oh, never you mind what she said, Jim. That's the way she always +talks. Gimme the bucket--I won't be gone only a a minute. SHE won't +ever know." + +"Oh, I dasn't, Mars Tom. Ole missis she'd take an' tar de head off'n +me. 'Deed she would." + +"SHE! She never licks anybody--whacks 'em over the head with her +thimble--and who cares for that, I'd like to know. She talks awful, but +talk don't hurt--anyways it don't if she don't cry. Jim, I'll give you +a marvel. I'll give you a white alley!" + +Jim began to waver. + +"White alley, Jim! And it's a bully taw." + +"My! Dat's a mighty gay marvel, I tell you! But Mars Tom I's powerful +'fraid ole missis--" + +"And besides, if you will I'll show you my sore toe." + +Jim was only human--this attraction was too much for him. He put down +his pail, took the white alley, and bent over the toe with absorbing +interest while the bandage was being unwound. In another moment he was +flying down the street with his pail and a tingling rear, Tom was +whitewashing with vigor, and Aunt Polly was retiring from the field +with a slipper in her hand and triumph in her eye. + +But Tom's energy did not last. He began to think of the fun he had +planned for this day, and his sorrows multiplied. Soon the free boys +would come tripping along on all sorts of delicious expeditions, and +they would make a world of fun of him for having to work--the very +thought of it burnt him like fire. He got out his worldly wealth and +examined it--bits of toys, marbles, and trash; enough to buy an +exchange of WORK, maybe, but not half enough to buy so much as half an +hour of pure freedom. So he returned his straitened means to his +pocket, and gave up the idea of trying to buy the boys. At this dark +and hopeless moment an inspiration burst upon him! Nothing less than a +great, magnificent inspiration. + +He took up his brush and went tranquilly to work. Ben Rogers hove in +sight presently--the very boy, of all boys, whose ridicule he had been +dreading. Ben's gait was the hop-skip-and-jump--proof enough that his +heart was light and his anticipations high. He was eating an apple, and +giving a long, melodious whoop, at intervals, followed by a deep-toned +ding-dong-dong, ding-dong-dong, for he was personating a steamboat. As +he drew near, he slackened speed, took the middle of the street, leaned +far over to starboard and rounded to ponderously and with laborious +pomp and circumstance--for he was personating the Big Missouri, and +considered himself to be drawing nine feet of water. He was boat and +captain and engine-bells combined, so he had to imagine himself +standing on his own hurricane-deck giving the orders and executing them: + +"Stop her, sir! Ting-a-ling-ling!" The headway ran almost out, and he +drew up slowly toward the sidewalk. + +"Ship up to back! Ting-a-ling-ling!" His arms straightened and +stiffened down his sides. + +"Set her back on the stabboard! Ting-a-ling-ling! Chow! ch-chow-wow! +Chow!" His right hand, meantime, describing stately circles--for it was +representing a forty-foot wheel. + +"Let her go back on the labboard! Ting-a-lingling! Chow-ch-chow-chow!" +The left hand began to describe circles. + +"Stop the stabboard! Ting-a-ling-ling! Stop the labboard! Come ahead +on the stabboard! Stop her! Let your outside turn over slow! +Ting-a-ling-ling! Chow-ow-ow! Get out that head-line! LIVELY now! +Come--out with your spring-line--what're you about there! Take a turn +round that stump with the bight of it! Stand by that stage, now--let her +go! Done with the engines, sir! Ting-a-ling-ling! SH'T! S'H'T! SH'T!" +(trying the gauge-cocks). + +Tom went on whitewashing--paid no attention to the steamboat. Ben +stared a moment and then said: "Hi-YI! YOU'RE up a stump, ain't you!" + +No answer. Tom surveyed his last touch with the eye of an artist, then +he gave his brush another gentle sweep and surveyed the result, as +before. Ben ranged up alongside of him. Tom's mouth watered for the +apple, but he stuck to his work. Ben said: + +"Hello, old chap, you got to work, hey?" + +Tom wheeled suddenly and said: + +"Why, it's you, Ben! I warn't noticing." + +"Say--I'm going in a-swimming, I am. Don't you wish you could? But of +course you'd druther WORK--wouldn't you? Course you would!" + +Tom contemplated the boy a bit, and said: + +"What do you call work?" + +"Why, ain't THAT work?" + +Tom resumed his whitewashing, and answered carelessly: + +"Well, maybe it is, and maybe it ain't. All I know, is, it suits Tom +Sawyer." + +"Oh come, now, you don't mean to let on that you LIKE it?" + +The brush continued to move. + +"Like it? Well, I don't see why I oughtn't to like it. Does a boy get +a chance to whitewash a fence every day?" + +That put the thing in a new light. Ben stopped nibbling his apple. Tom +swept his brush daintily back and forth--stepped back to note the +effect--added a touch here and there--criticised the effect again--Ben +watching every move and getting more and more interested, more and more +absorbed. Presently he said: + +"Say, Tom, let ME whitewash a little." + +Tom considered, was about to consent; but he altered his mind: + +"No--no--I reckon it wouldn't hardly do, Ben. You see, Aunt Polly's +awful particular about this fence--right here on the street, you know +--but if it was the back fence I wouldn't mind and SHE wouldn't. Yes, +she's awful particular about this fence; it's got to be done very +careful; I reckon there ain't one boy in a thousand, maybe two +thousand, that can do it the way it's got to be done." + +"No--is that so? Oh come, now--lemme just try. Only just a little--I'd +let YOU, if you was me, Tom." + +"Ben, I'd like to, honest injun; but Aunt Polly--well, Jim wanted to +do it, but she wouldn't let him; Sid wanted to do it, and she wouldn't +let Sid. Now don't you see how I'm fixed? If you was to tackle this +fence and anything was to happen to it--" + +"Oh, shucks, I'll be just as careful. Now lemme try. Say--I'll give +you the core of my apple." + +"Well, here--No, Ben, now don't. I'm afeard--" + +"I'll give you ALL of it!" + +Tom gave up the brush with reluctance in his face, but alacrity in his +heart. And while the late steamer Big Missouri worked and sweated in +the sun, the retired artist sat on a barrel in the shade close by, +dangled his legs, munched his apple, and planned the slaughter of more +innocents. There was no lack of material; boys happened along every +little while; they came to jeer, but remained to whitewash. By the time +Ben was fagged out, Tom had traded the next chance to Billy Fisher for +a kite, in good repair; and when he played out, Johnny Miller bought in +for a dead rat and a string to swing it with--and so on, and so on, +hour after hour. And when the middle of the afternoon came, from being +a poor poverty-stricken boy in the morning, Tom was literally rolling +in wealth. He had besides the things before mentioned, twelve marbles, +part of a jews-harp, a piece of blue bottle-glass to look through, a +spool cannon, a key that wouldn't unlock anything, a fragment of chalk, +a glass stopper of a decanter, a tin soldier, a couple of tadpoles, six +fire-crackers, a kitten with only one eye, a brass doorknob, a +dog-collar--but no dog--the handle of a knife, four pieces of +orange-peel, and a dilapidated old window sash. + +He had had a nice, good, idle time all the while--plenty of company +--and the fence had three coats of whitewash on it! If he hadn't run out +of whitewash he would have bankrupted every boy in the village. + +Tom said to himself that it was not such a hollow world, after all. He +had discovered a great law of human action, without knowing it--namely, +that in order to make a man or a boy covet a thing, it is only +necessary to make the thing difficult to attain. If he had been a great +and wise philosopher, like the writer of this book, he would now have +comprehended that Work consists of whatever a body is OBLIGED to do, +and that Play consists of whatever a body is not obliged to do. And +this would help him to understand why constructing artificial flowers +or performing on a tread-mill is work, while rolling ten-pins or +climbing Mont Blanc is only amusement. There are wealthy gentlemen in +England who drive four-horse passenger-coaches twenty or thirty miles +on a daily line, in the summer, because the privilege costs them +considerable money; but if they were offered wages for the service, +that would turn it into work and then they would resign. + +The boy mused awhile over the substantial change which had taken place +in his worldly circumstances, and then wended toward headquarters to +report. + + + +CHAPTER III + +TOM presented himself before Aunt Polly, who was sitting by an open +window in a pleasant rearward apartment, which was bedroom, +breakfast-room, dining-room, and library, combined. The balmy summer +air, the restful quiet, the odor of the flowers, and the drowsing murmur +of the bees had had their effect, and she was nodding over her knitting +--for she had no company but the cat, and it was asleep in her lap. Her +spectacles were propped up on her gray head for safety. She had thought +that of course Tom had deserted long ago, and she wondered at seeing him +place himself in her power again in this intrepid way. He said: "Mayn't +I go and play now, aunt?" + +"What, a'ready? How much have you done?" + +"It's all done, aunt." + +"Tom, don't lie to me--I can't bear it." + +"I ain't, aunt; it IS all done." + +Aunt Polly placed small trust in such evidence. She went out to see +for herself; and she would have been content to find twenty per cent. +of Tom's statement true. When she found the entire fence whitewashed, +and not only whitewashed but elaborately coated and recoated, and even +a streak added to the ground, her astonishment was almost unspeakable. +She said: + +"Well, I never! There's no getting round it, you can work when you're +a mind to, Tom." And then she diluted the compliment by adding, "But +it's powerful seldom you're a mind to, I'm bound to say. Well, go 'long +and play; but mind you get back some time in a week, or I'll tan you." + +She was so overcome by the splendor of his achievement that she took +him into the closet and selected a choice apple and delivered it to +him, along with an improving lecture upon the added value and flavor a +treat took to itself when it came without sin through virtuous effort. +And while she closed with a happy Scriptural flourish, he "hooked" a +doughnut. + +Then he skipped out, and saw Sid just starting up the outside stairway +that led to the back rooms on the second floor. Clods were handy and +the air was full of them in a twinkling. They raged around Sid like a +hail-storm; and before Aunt Polly could collect her surprised faculties +and sally to the rescue, six or seven clods had taken personal effect, +and Tom was over the fence and gone. There was a gate, but as a general +thing he was too crowded for time to make use of it. His soul was at +peace, now that he had settled with Sid for calling attention to his +black thread and getting him into trouble. + +Tom skirted the block, and came round into a muddy alley that led by +the back of his aunt's cow-stable. He presently got safely beyond the +reach of capture and punishment, and hastened toward the public square +of the village, where two "military" companies of boys had met for +conflict, according to previous appointment. Tom was General of one of +these armies, Joe Harper (a bosom friend) General of the other. These +two great commanders did not condescend to fight in person--that being +better suited to the still smaller fry--but sat together on an eminence +and conducted the field operations by orders delivered through +aides-de-camp. Tom's army won a great victory, after a long and +hard-fought battle. Then the dead were counted, prisoners exchanged, +the terms of the next disagreement agreed upon, and the day for the +necessary battle appointed; after which the armies fell into line and +marched away, and Tom turned homeward alone. + +As he was passing by the house where Jeff Thatcher lived, he saw a new +girl in the garden--a lovely little blue-eyed creature with yellow hair +plaited into two long-tails, white summer frock and embroidered +pantalettes. The fresh-crowned hero fell without firing a shot. A +certain Amy Lawrence vanished out of his heart and left not even a +memory of herself behind. He had thought he loved her to distraction; +he had regarded his passion as adoration; and behold it was only a poor +little evanescent partiality. He had been months winning her; she had +confessed hardly a week ago; he had been the happiest and the proudest +boy in the world only seven short days, and here in one instant of time +she had gone out of his heart like a casual stranger whose visit is +done. + +He worshipped this new angel with furtive eye, till he saw that she +had discovered him; then he pretended he did not know she was present, +and began to "show off" in all sorts of absurd boyish ways, in order to +win her admiration. He kept up this grotesque foolishness for some +time; but by-and-by, while he was in the midst of some dangerous +gymnastic performances, he glanced aside and saw that the little girl +was wending her way toward the house. Tom came up to the fence and +leaned on it, grieving, and hoping she would tarry yet awhile longer. +She halted a moment on the steps and then moved toward the door. Tom +heaved a great sigh as she put her foot on the threshold. But his face +lit up, right away, for she tossed a pansy over the fence a moment +before she disappeared. + +The boy ran around and stopped within a foot or two of the flower, and +then shaded his eyes with his hand and began to look down street as if +he had discovered something of interest going on in that direction. +Presently he picked up a straw and began trying to balance it on his +nose, with his head tilted far back; and as he moved from side to side, +in his efforts, he edged nearer and nearer toward the pansy; finally +his bare foot rested upon it, his pliant toes closed upon it, and he +hopped away with the treasure and disappeared round the corner. But +only for a minute--only while he could button the flower inside his +jacket, next his heart--or next his stomach, possibly, for he was not +much posted in anatomy, and not hypercritical, anyway. + +He returned, now, and hung about the fence till nightfall, "showing +off," as before; but the girl never exhibited herself again, though Tom +comforted himself a little with the hope that she had been near some +window, meantime, and been aware of his attentions. Finally he strode +home reluctantly, with his poor head full of visions. + +All through supper his spirits were so high that his aunt wondered +"what had got into the child." He took a good scolding about clodding +Sid, and did not seem to mind it in the least. He tried to steal sugar +under his aunt's very nose, and got his knuckles rapped for it. He said: + +"Aunt, you don't whack Sid when he takes it." + +"Well, Sid don't torment a body the way you do. You'd be always into +that sugar if I warn't watching you." + +Presently she stepped into the kitchen, and Sid, happy in his +immunity, reached for the sugar-bowl--a sort of glorying over Tom which +was wellnigh unbearable. But Sid's fingers slipped and the bowl dropped +and broke. Tom was in ecstasies. In such ecstasies that he even +controlled his tongue and was silent. He said to himself that he would +not speak a word, even when his aunt came in, but would sit perfectly +still till she asked who did the mischief; and then he would tell, and +there would be nothing so good in the world as to see that pet model +"catch it." He was so brimful of exultation that he could hardly hold +himself when the old lady came back and stood above the wreck +discharging lightnings of wrath from over her spectacles. He said to +himself, "Now it's coming!" And the next instant he was sprawling on +the floor! The potent palm was uplifted to strike again when Tom cried +out: + +"Hold on, now, what 'er you belting ME for?--Sid broke it!" + +Aunt Polly paused, perplexed, and Tom looked for healing pity. But +when she got her tongue again, she only said: + +"Umf! Well, you didn't get a lick amiss, I reckon. You been into some +other audacious mischief when I wasn't around, like enough." + +Then her conscience reproached her, and she yearned to say something +kind and loving; but she judged that this would be construed into a +confession that she had been in the wrong, and discipline forbade that. +So she kept silence, and went about her affairs with a troubled heart. +Tom sulked in a corner and exalted his woes. He knew that in her heart +his aunt was on her knees to him, and he was morosely gratified by the +consciousness of it. He would hang out no signals, he would take notice +of none. He knew that a yearning glance fell upon him, now and then, +through a film of tears, but he refused recognition of it. He pictured +himself lying sick unto death and his aunt bending over him beseeching +one little forgiving word, but he would turn his face to the wall, and +die with that word unsaid. Ah, how would she feel then? And he pictured +himself brought home from the river, dead, with his curls all wet, and +his sore heart at rest. How she would throw herself upon him, and how +her tears would fall like rain, and her lips pray God to give her back +her boy and she would never, never abuse him any more! But he would lie +there cold and white and make no sign--a poor little sufferer, whose +griefs were at an end. He so worked upon his feelings with the pathos +of these dreams, that he had to keep swallowing, he was so like to +choke; and his eyes swam in a blur of water, which overflowed when he +winked, and ran down and trickled from the end of his nose. And such a +luxury to him was this petting of his sorrows, that he could not bear +to have any worldly cheeriness or any grating delight intrude upon it; +it was too sacred for such contact; and so, presently, when his cousin +Mary danced in, all alive with the joy of seeing home again after an +age-long visit of one week to the country, he got up and moved in +clouds and darkness out at one door as she brought song and sunshine in +at the other. + +He wandered far from the accustomed haunts of boys, and sought +desolate places that were in harmony with his spirit. A log raft in the +river invited him, and he seated himself on its outer edge and +contemplated the dreary vastness of the stream, wishing, the while, +that he could only be drowned, all at once and unconsciously, without +undergoing the uncomfortable routine devised by nature. Then he thought +of his flower. He got it out, rumpled and wilted, and it mightily +increased his dismal felicity. He wondered if she would pity him if she +knew? Would she cry, and wish that she had a right to put her arms +around his neck and comfort him? Or would she turn coldly away like all +the hollow world? This picture brought such an agony of pleasurable +suffering that he worked it over and over again in his mind and set it +up in new and varied lights, till he wore it threadbare. At last he +rose up sighing and departed in the darkness. + +About half-past nine or ten o'clock he came along the deserted street +to where the Adored Unknown lived; he paused a moment; no sound fell +upon his listening ear; a candle was casting a dull glow upon the +curtain of a second-story window. Was the sacred presence there? He +climbed the fence, threaded his stealthy way through the plants, till +he stood under that window; he looked up at it long, and with emotion; +then he laid him down on the ground under it, disposing himself upon +his back, with his hands clasped upon his breast and holding his poor +wilted flower. And thus he would die--out in the cold world, with no +shelter over his homeless head, no friendly hand to wipe the +death-damps from his brow, no loving face to bend pityingly over him +when the great agony came. And thus SHE would see him when she looked +out upon the glad morning, and oh! would she drop one little tear upon +his poor, lifeless form, would she heave one little sigh to see a bright +young life so rudely blighted, so untimely cut down? + +The window went up, a maid-servant's discordant voice profaned the +holy calm, and a deluge of water drenched the prone martyr's remains! + +The strangling hero sprang up with a relieving snort. There was a whiz +as of a missile in the air, mingled with the murmur of a curse, a sound +as of shivering glass followed, and a small, vague form went over the +fence and shot away in the gloom. + +Not long after, as Tom, all undressed for bed, was surveying his +drenched garments by the light of a tallow dip, Sid woke up; but if he +had any dim idea of making any "references to allusions," he thought +better of it and held his peace, for there was danger in Tom's eye. + +Tom turned in without the added vexation of prayers, and Sid made +mental note of the omission. + + + +CHAPTER IV + +THE sun rose upon a tranquil world, and beamed down upon the peaceful +village like a benediction. Breakfast over, Aunt Polly had family +worship: it began with a prayer built from the ground up of solid +courses of Scriptural quotations, welded together with a thin mortar of +originality; and from the summit of this she delivered a grim chapter +of the Mosaic Law, as from Sinai. + +Then Tom girded up his loins, so to speak, and went to work to "get +his verses." Sid had learned his lesson days before. Tom bent all his +energies to the memorizing of five verses, and he chose part of the +Sermon on the Mount, because he could find no verses that were shorter. +At the end of half an hour Tom had a vague general idea of his lesson, +but no more, for his mind was traversing the whole field of human +thought, and his hands were busy with distracting recreations. Mary +took his book to hear him recite, and he tried to find his way through +the fog: + +"Blessed are the--a--a--" + +"Poor"-- + +"Yes--poor; blessed are the poor--a--a--" + +"In spirit--" + +"In spirit; blessed are the poor in spirit, for they--they--" + +"THEIRS--" + +"For THEIRS. Blessed are the poor in spirit, for theirs is the kingdom +of heaven. Blessed are they that mourn, for they--they--" + +"Sh--" + +"For they--a--" + +"S, H, A--" + +"For they S, H--Oh, I don't know what it is!" + +"SHALL!" + +"Oh, SHALL! for they shall--for they shall--a--a--shall mourn--a--a-- +blessed are they that shall--they that--a--they that shall mourn, for +they shall--a--shall WHAT? Why don't you tell me, Mary?--what do you +want to be so mean for?" + +"Oh, Tom, you poor thick-headed thing, I'm not teasing you. I wouldn't +do that. You must go and learn it again. Don't you be discouraged, Tom, +you'll manage it--and if you do, I'll give you something ever so nice. +There, now, that's a good boy." + +"All right! What is it, Mary, tell me what it is." + +"Never you mind, Tom. You know if I say it's nice, it is nice." + +"You bet you that's so, Mary. All right, I'll tackle it again." + +And he did "tackle it again"--and under the double pressure of +curiosity and prospective gain he did it with such spirit that he +accomplished a shining success. Mary gave him a brand-new "Barlow" +knife worth twelve and a half cents; and the convulsion of delight that +swept his system shook him to his foundations. True, the knife would +not cut anything, but it was a "sure-enough" Barlow, and there was +inconceivable grandeur in that--though where the Western boys ever got +the idea that such a weapon could possibly be counterfeited to its +injury is an imposing mystery and will always remain so, perhaps. Tom +contrived to scarify the cupboard with it, and was arranging to begin +on the bureau, when he was called off to dress for Sunday-school. + +Mary gave him a tin basin of water and a piece of soap, and he went +outside the door and set the basin on a little bench there; then he +dipped the soap in the water and laid it down; turned up his sleeves; +poured out the water on the ground, gently, and then entered the +kitchen and began to wipe his face diligently on the towel behind the +door. But Mary removed the towel and said: + +"Now ain't you ashamed, Tom. You mustn't be so bad. Water won't hurt +you." + +Tom was a trifle disconcerted. The basin was refilled, and this time +he stood over it a little while, gathering resolution; took in a big +breath and began. When he entered the kitchen presently, with both eyes +shut and groping for the towel with his hands, an honorable testimony +of suds and water was dripping from his face. But when he emerged from +the towel, he was not yet satisfactory, for the clean territory stopped +short at his chin and his jaws, like a mask; below and beyond this line +there was a dark expanse of unirrigated soil that spread downward in +front and backward around his neck. Mary took him in hand, and when she +was done with him he was a man and a brother, without distinction of +color, and his saturated hair was neatly brushed, and its short curls +wrought into a dainty and symmetrical general effect. [He privately +smoothed out the curls, with labor and difficulty, and plastered his +hair close down to his head; for he held curls to be effeminate, and +his own filled his life with bitterness.] Then Mary got out a suit of +his clothing that had been used only on Sundays during two years--they +were simply called his "other clothes"--and so by that we know the +size of his wardrobe. The girl "put him to rights" after he had dressed +himself; she buttoned his neat roundabout up to his chin, turned his +vast shirt collar down over his shoulders, brushed him off and crowned +him with his speckled straw hat. He now looked exceedingly improved and +uncomfortable. He was fully as uncomfortable as he looked; for there +was a restraint about whole clothes and cleanliness that galled him. He +hoped that Mary would forget his shoes, but the hope was blighted; she +coated them thoroughly with tallow, as was the custom, and brought them +out. He lost his temper and said he was always being made to do +everything he didn't want to do. But Mary said, persuasively: + +"Please, Tom--that's a good boy." + +So he got into the shoes snarling. Mary was soon ready, and the three +children set out for Sunday-school--a place that Tom hated with his +whole heart; but Sid and Mary were fond of it. + +Sabbath-school hours were from nine to half-past ten; and then church +service. Two of the children always remained for the sermon +voluntarily, and the other always remained too--for stronger reasons. +The church's high-backed, uncushioned pews would seat about three +hundred persons; the edifice was but a small, plain affair, with a sort +of pine board tree-box on top of it for a steeple. At the door Tom +dropped back a step and accosted a Sunday-dressed comrade: + +"Say, Billy, got a yaller ticket?" + +"Yes." + +"What'll you take for her?" + +"What'll you give?" + +"Piece of lickrish and a fish-hook." + +"Less see 'em." + +Tom exhibited. They were satisfactory, and the property changed hands. +Then Tom traded a couple of white alleys for three red tickets, and +some small trifle or other for a couple of blue ones. He waylaid other +boys as they came, and went on buying tickets of various colors ten or +fifteen minutes longer. He entered the church, now, with a swarm of +clean and noisy boys and girls, proceeded to his seat and started a +quarrel with the first boy that came handy. The teacher, a grave, +elderly man, interfered; then turned his back a moment and Tom pulled a +boy's hair in the next bench, and was absorbed in his book when the boy +turned around; stuck a pin in another boy, presently, in order to hear +him say "Ouch!" and got a new reprimand from his teacher. Tom's whole +class were of a pattern--restless, noisy, and troublesome. When they +came to recite their lessons, not one of them knew his verses +perfectly, but had to be prompted all along. However, they worried +through, and each got his reward--in small blue tickets, each with a +passage of Scripture on it; each blue ticket was pay for two verses of +the recitation. Ten blue tickets equalled a red one, and could be +exchanged for it; ten red tickets equalled a yellow one; for ten yellow +tickets the superintendent gave a very plainly bound Bible (worth forty +cents in those easy times) to the pupil. How many of my readers would +have the industry and application to memorize two thousand verses, even +for a Dore Bible? And yet Mary had acquired two Bibles in this way--it +was the patient work of two years--and a boy of German parentage had +won four or five. He once recited three thousand verses without +stopping; but the strain upon his mental faculties was too great, and +he was little better than an idiot from that day forth--a grievous +misfortune for the school, for on great occasions, before company, the +superintendent (as Tom expressed it) had always made this boy come out +and "spread himself." Only the older pupils managed to keep their +tickets and stick to their tedious work long enough to get a Bible, and +so the delivery of one of these prizes was a rare and noteworthy +circumstance; the successful pupil was so great and conspicuous for +that day that on the spot every scholar's heart was fired with a fresh +ambition that often lasted a couple of weeks. It is possible that Tom's +mental stomach had never really hungered for one of those prizes, but +unquestionably his entire being had for many a day longed for the glory +and the eclat that came with it. + +In due course the superintendent stood up in front of the pulpit, with +a closed hymn-book in his hand and his forefinger inserted between its +leaves, and commanded attention. When a Sunday-school superintendent +makes his customary little speech, a hymn-book in the hand is as +necessary as is the inevitable sheet of music in the hand of a singer +who stands forward on the platform and sings a solo at a concert +--though why, is a mystery: for neither the hymn-book nor the sheet of +music is ever referred to by the sufferer. This superintendent was a +slim creature of thirty-five, with a sandy goatee and short sandy hair; +he wore a stiff standing-collar whose upper edge almost reached his +ears and whose sharp points curved forward abreast the corners of his +mouth--a fence that compelled a straight lookout ahead, and a turning +of the whole body when a side view was required; his chin was propped +on a spreading cravat which was as broad and as long as a bank-note, +and had fringed ends; his boot toes were turned sharply up, in the +fashion of the day, like sleigh-runners--an effect patiently and +laboriously produced by the young men by sitting with their toes +pressed against a wall for hours together. Mr. Walters was very earnest +of mien, and very sincere and honest at heart; and he held sacred +things and places in such reverence, and so separated them from worldly +matters, that unconsciously to himself his Sunday-school voice had +acquired a peculiar intonation which was wholly absent on week-days. He +began after this fashion: + +"Now, children, I want you all to sit up just as straight and pretty +as you can and give me all your attention for a minute or two. There +--that is it. That is the way good little boys and girls should do. I see +one little girl who is looking out of the window--I am afraid she +thinks I am out there somewhere--perhaps up in one of the trees making +a speech to the little birds. [Applausive titter.] I want to tell you +how good it makes me feel to see so many bright, clean little faces +assembled in a place like this, learning to do right and be good." And +so forth and so on. It is not necessary to set down the rest of the +oration. It was of a pattern which does not vary, and so it is familiar +to us all. + +The latter third of the speech was marred by the resumption of fights +and other recreations among certain of the bad boys, and by fidgetings +and whisperings that extended far and wide, washing even to the bases +of isolated and incorruptible rocks like Sid and Mary. But now every +sound ceased suddenly, with the subsidence of Mr. Walters' voice, and +the conclusion of the speech was received with a burst of silent +gratitude. + +A good part of the whispering had been occasioned by an event which +was more or less rare--the entrance of visitors: lawyer Thatcher, +accompanied by a very feeble and aged man; a fine, portly, middle-aged +gentleman with iron-gray hair; and a dignified lady who was doubtless +the latter's wife. The lady was leading a child. Tom had been restless +and full of chafings and repinings; conscience-smitten, too--he could +not meet Amy Lawrence's eye, he could not brook her loving gaze. But +when he saw this small new-comer his soul was all ablaze with bliss in +a moment. The next moment he was "showing off" with all his might +--cuffing boys, pulling hair, making faces--in a word, using every art +that seemed likely to fascinate a girl and win her applause. His +exaltation had but one alloy--the memory of his humiliation in this +angel's garden--and that record in sand was fast washing out, under +the waves of happiness that were sweeping over it now. + +The visitors were given the highest seat of honor, and as soon as Mr. +Walters' speech was finished, he introduced them to the school. The +middle-aged man turned out to be a prodigious personage--no less a one +than the county judge--altogether the most august creation these +children had ever looked upon--and they wondered what kind of material +he was made of--and they half wanted to hear him roar, and were half +afraid he might, too. He was from Constantinople, twelve miles away--so +he had travelled, and seen the world--these very eyes had looked upon +the county court-house--which was said to have a tin roof. The awe +which these reflections inspired was attested by the impressive silence +and the ranks of staring eyes. This was the great Judge Thatcher, +brother of their own lawyer. Jeff Thatcher immediately went forward, to +be familiar with the great man and be envied by the school. It would +have been music to his soul to hear the whisperings: + +"Look at him, Jim! He's a going up there. Say--look! he's a going to +shake hands with him--he IS shaking hands with him! By jings, don't you +wish you was Jeff?" + +Mr. Walters fell to "showing off," with all sorts of official +bustlings and activities, giving orders, delivering judgments, +discharging directions here, there, everywhere that he could find a +target. The librarian "showed off"--running hither and thither with his +arms full of books and making a deal of the splutter and fuss that +insect authority delights in. The young lady teachers "showed off" +--bending sweetly over pupils that were lately being boxed, lifting +pretty warning fingers at bad little boys and patting good ones +lovingly. The young gentlemen teachers "showed off" with small +scoldings and other little displays of authority and fine attention to +discipline--and most of the teachers, of both sexes, found business up +at the library, by the pulpit; and it was business that frequently had +to be done over again two or three times (with much seeming vexation). +The little girls "showed off" in various ways, and the little boys +"showed off" with such diligence that the air was thick with paper wads +and the murmur of scufflings. And above it all the great man sat and +beamed a majestic judicial smile upon all the house, and warmed himself +in the sun of his own grandeur--for he was "showing off," too. + +There was only one thing wanting to make Mr. Walters' ecstasy +complete, and that was a chance to deliver a Bible-prize and exhibit a +prodigy. Several pupils had a few yellow tickets, but none had enough +--he had been around among the star pupils inquiring. He would have given +worlds, now, to have that German lad back again with a sound mind. + +And now at this moment, when hope was dead, Tom Sawyer came forward +with nine yellow tickets, nine red tickets, and ten blue ones, and +demanded a Bible. This was a thunderbolt out of a clear sky. Walters +was not expecting an application from this source for the next ten +years. But there was no getting around it--here were the certified +checks, and they were good for their face. Tom was therefore elevated +to a place with the Judge and the other elect, and the great news was +announced from headquarters. It was the most stunning surprise of the +decade, and so profound was the sensation that it lifted the new hero +up to the judicial one's altitude, and the school had two marvels to +gaze upon in place of one. The boys were all eaten up with envy--but +those that suffered the bitterest pangs were those who perceived too +late that they themselves had contributed to this hated splendor by +trading tickets to Tom for the wealth he had amassed in selling +whitewashing privileges. These despised themselves, as being the dupes +of a wily fraud, a guileful snake in the grass. + +The prize was delivered to Tom with as much effusion as the +superintendent could pump up under the circumstances; but it lacked +somewhat of the true gush, for the poor fellow's instinct taught him +that there was a mystery here that could not well bear the light, +perhaps; it was simply preposterous that this boy had warehoused two +thousand sheaves of Scriptural wisdom on his premises--a dozen would +strain his capacity, without a doubt. + +Amy Lawrence was proud and glad, and she tried to make Tom see it in +her face--but he wouldn't look. She wondered; then she was just a grain +troubled; next a dim suspicion came and went--came again; she watched; +a furtive glance told her worlds--and then her heart broke, and she was +jealous, and angry, and the tears came and she hated everybody. Tom +most of all (she thought). + +Tom was introduced to the Judge; but his tongue was tied, his breath +would hardly come, his heart quaked--partly because of the awful +greatness of the man, but mainly because he was her parent. He would +have liked to fall down and worship him, if it were in the dark. The +Judge put his hand on Tom's head and called him a fine little man, and +asked him what his name was. The boy stammered, gasped, and got it out: + +"Tom." + +"Oh, no, not Tom--it is--" + +"Thomas." + +"Ah, that's it. I thought there was more to it, maybe. That's very +well. But you've another one I daresay, and you'll tell it to me, won't +you?" + +"Tell the gentleman your other name, Thomas," said Walters, "and say +sir. You mustn't forget your manners." + +"Thomas Sawyer--sir." + +"That's it! That's a good boy. Fine boy. Fine, manly little fellow. +Two thousand verses is a great many--very, very great many. And you +never can be sorry for the trouble you took to learn them; for +knowledge is worth more than anything there is in the world; it's what +makes great men and good men; you'll be a great man and a good man +yourself, some day, Thomas, and then you'll look back and say, It's all +owing to the precious Sunday-school privileges of my boyhood--it's all +owing to my dear teachers that taught me to learn--it's all owing to +the good superintendent, who encouraged me, and watched over me, and +gave me a beautiful Bible--a splendid elegant Bible--to keep and have +it all for my own, always--it's all owing to right bringing up! That is +what you will say, Thomas--and you wouldn't take any money for those +two thousand verses--no indeed you wouldn't. And now you wouldn't mind +telling me and this lady some of the things you've learned--no, I know +you wouldn't--for we are proud of little boys that learn. Now, no +doubt you know the names of all the twelve disciples. Won't you tell us +the names of the first two that were appointed?" + +Tom was tugging at a button-hole and looking sheepish. He blushed, +now, and his eyes fell. Mr. Walters' heart sank within him. He said to +himself, it is not possible that the boy can answer the simplest +question--why DID the Judge ask him? Yet he felt obliged to speak up +and say: + +"Answer the gentleman, Thomas--don't be afraid." + +Tom still hung fire. + +"Now I know you'll tell me," said the lady. "The names of the first +two disciples were--" + +"DAVID AND GOLIAH!" + +Let us draw the curtain of charity over the rest of the scene. + + + +CHAPTER V + +ABOUT half-past ten the cracked bell of the small church began to +ring, and presently the people began to gather for the morning sermon. +The Sunday-school children distributed themselves about the house and +occupied pews with their parents, so as to be under supervision. Aunt +Polly came, and Tom and Sid and Mary sat with her--Tom being placed +next the aisle, in order that he might be as far away from the open +window and the seductive outside summer scenes as possible. The crowd +filed up the aisles: the aged and needy postmaster, who had seen better +days; the mayor and his wife--for they had a mayor there, among other +unnecessaries; the justice of the peace; the widow Douglass, fair, +smart, and forty, a generous, good-hearted soul and well-to-do, her +hill mansion the only palace in the town, and the most hospitable and +much the most lavish in the matter of festivities that St. Petersburg +could boast; the bent and venerable Major and Mrs. Ward; lawyer +Riverson, the new notable from a distance; next the belle of the +village, followed by a troop of lawn-clad and ribbon-decked young +heart-breakers; then all the young clerks in town in a body--for they +had stood in the vestibule sucking their cane-heads, a circling wall of +oiled and simpering admirers, till the last girl had run their gantlet; +and last of all came the Model Boy, Willie Mufferson, taking as heedful +care of his mother as if she were cut glass. He always brought his +mother to church, and was the pride of all the matrons. The boys all +hated him, he was so good. And besides, he had been "thrown up to them" +so much. His white handkerchief was hanging out of his pocket behind, as +usual on Sundays--accidentally. Tom had no handkerchief, and he looked +upon boys who had as snobs. + +The congregation being fully assembled, now, the bell rang once more, +to warn laggards and stragglers, and then a solemn hush fell upon the +church which was only broken by the tittering and whispering of the +choir in the gallery. The choir always tittered and whispered all +through service. There was once a church choir that was not ill-bred, +but I have forgotten where it was, now. It was a great many years ago, +and I can scarcely remember anything about it, but I think it was in +some foreign country. + +The minister gave out the hymn, and read it through with a relish, in +a peculiar style which was much admired in that part of the country. +His voice began on a medium key and climbed steadily up till it reached +a certain point, where it bore with strong emphasis upon the topmost +word and then plunged down as if from a spring-board: + + Shall I be car-ri-ed toe the skies, on flow'ry BEDS of ease, + + Whilst others fight to win the prize, and sail thro' BLOODY seas? + +He was regarded as a wonderful reader. At church "sociables" he was +always called upon to read poetry; and when he was through, the ladies +would lift up their hands and let them fall helplessly in their laps, +and "wall" their eyes, and shake their heads, as much as to say, "Words +cannot express it; it is too beautiful, TOO beautiful for this mortal +earth." + +After the hymn had been sung, the Rev. Mr. Sprague turned himself into +a bulletin-board, and read off "notices" of meetings and societies and +things till it seemed that the list would stretch out to the crack of +doom--a queer custom which is still kept up in America, even in cities, +away here in this age of abundant newspapers. Often, the less there is +to justify a traditional custom, the harder it is to get rid of it. + +And now the minister prayed. A good, generous prayer it was, and went +into details: it pleaded for the church, and the little children of the +church; for the other churches of the village; for the village itself; +for the county; for the State; for the State officers; for the United +States; for the churches of the United States; for Congress; for the +President; for the officers of the Government; for poor sailors, tossed +by stormy seas; for the oppressed millions groaning under the heel of +European monarchies and Oriental despotisms; for such as have the light +and the good tidings, and yet have not eyes to see nor ears to hear +withal; for the heathen in the far islands of the sea; and closed with +a supplication that the words he was about to speak might find grace +and favor, and be as seed sown in fertile ground, yielding in time a +grateful harvest of good. Amen. + +There was a rustling of dresses, and the standing congregation sat +down. The boy whose history this book relates did not enjoy the prayer, +he only endured it--if he even did that much. He was restive all +through it; he kept tally of the details of the prayer, unconsciously +--for he was not listening, but he knew the ground of old, and the +clergyman's regular route over it--and when a little trifle of new +matter was interlarded, his ear detected it and his whole nature +resented it; he considered additions unfair, and scoundrelly. In the +midst of the prayer a fly had lit on the back of the pew in front of +him and tortured his spirit by calmly rubbing its hands together, +embracing its head with its arms, and polishing it so vigorously that +it seemed to almost part company with the body, and the slender thread +of a neck was exposed to view; scraping its wings with its hind legs +and smoothing them to its body as if they had been coat-tails; going +through its whole toilet as tranquilly as if it knew it was perfectly +safe. As indeed it was; for as sorely as Tom's hands itched to grab for +it they did not dare--he believed his soul would be instantly destroyed +if he did such a thing while the prayer was going on. But with the +closing sentence his hand began to curve and steal forward; and the +instant the "Amen" was out the fly was a prisoner of war. His aunt +detected the act and made him let it go. + +The minister gave out his text and droned along monotonously through +an argument that was so prosy that many a head by and by began to nod +--and yet it was an argument that dealt in limitless fire and brimstone +and thinned the predestined elect down to a company so small as to be +hardly worth the saving. Tom counted the pages of the sermon; after +church he always knew how many pages there had been, but he seldom knew +anything else about the discourse. However, this time he was really +interested for a little while. The minister made a grand and moving +picture of the assembling together of the world's hosts at the +millennium when the lion and the lamb should lie down together and a +little child should lead them. But the pathos, the lesson, the moral of +the great spectacle were lost upon the boy; he only thought of the +conspicuousness of the principal character before the on-looking +nations; his face lit with the thought, and he said to himself that he +wished he could be that child, if it was a tame lion. + +Now he lapsed into suffering again, as the dry argument was resumed. +Presently he bethought him of a treasure he had and got it out. It was +a large black beetle with formidable jaws--a "pinchbug," he called it. +It was in a percussion-cap box. The first thing the beetle did was to +take him by the finger. A natural fillip followed, the beetle went +floundering into the aisle and lit on its back, and the hurt finger +went into the boy's mouth. The beetle lay there working its helpless +legs, unable to turn over. Tom eyed it, and longed for it; but it was +safe out of his reach. Other people uninterested in the sermon found +relief in the beetle, and they eyed it too. Presently a vagrant poodle +dog came idling along, sad at heart, lazy with the summer softness and +the quiet, weary of captivity, sighing for change. He spied the beetle; +the drooping tail lifted and wagged. He surveyed the prize; walked +around it; smelt at it from a safe distance; walked around it again; +grew bolder, and took a closer smell; then lifted his lip and made a +gingerly snatch at it, just missing it; made another, and another; +began to enjoy the diversion; subsided to his stomach with the beetle +between his paws, and continued his experiments; grew weary at last, +and then indifferent and absent-minded. His head nodded, and little by +little his chin descended and touched the enemy, who seized it. There +was a sharp yelp, a flirt of the poodle's head, and the beetle fell a +couple of yards away, and lit on its back once more. The neighboring +spectators shook with a gentle inward joy, several faces went behind +fans and handkerchiefs, and Tom was entirely happy. The dog looked +foolish, and probably felt so; but there was resentment in his heart, +too, and a craving for revenge. So he went to the beetle and began a +wary attack on it again; jumping at it from every point of a circle, +lighting with his fore-paws within an inch of the creature, making even +closer snatches at it with his teeth, and jerking his head till his +ears flapped again. But he grew tired once more, after a while; tried +to amuse himself with a fly but found no relief; followed an ant +around, with his nose close to the floor, and quickly wearied of that; +yawned, sighed, forgot the beetle entirely, and sat down on it. Then +there was a wild yelp of agony and the poodle went sailing up the +aisle; the yelps continued, and so did the dog; he crossed the house in +front of the altar; he flew down the other aisle; he crossed before the +doors; he clamored up the home-stretch; his anguish grew with his +progress, till presently he was but a woolly comet moving in its orbit +with the gleam and the speed of light. At last the frantic sufferer +sheered from its course, and sprang into its master's lap; he flung it +out of the window, and the voice of distress quickly thinned away and +died in the distance. + +By this time the whole church was red-faced and suffocating with +suppressed laughter, and the sermon had come to a dead standstill. The +discourse was resumed presently, but it went lame and halting, all +possibility of impressiveness being at an end; for even the gravest +sentiments were constantly being received with a smothered burst of +unholy mirth, under cover of some remote pew-back, as if the poor +parson had said a rarely facetious thing. It was a genuine relief to +the whole congregation when the ordeal was over and the benediction +pronounced. + +Tom Sawyer went home quite cheerful, thinking to himself that there +was some satisfaction about divine service when there was a bit of +variety in it. He had but one marring thought; he was willing that the +dog should play with his pinchbug, but he did not think it was upright +in him to carry it off. + + + +CHAPTER VI + +MONDAY morning found Tom Sawyer miserable. Monday morning always found +him so--because it began another week's slow suffering in school. He +generally began that day with wishing he had had no intervening +holiday, it made the going into captivity and fetters again so much +more odious. + +Tom lay thinking. Presently it occurred to him that he wished he was +sick; then he could stay home from school. Here was a vague +possibility. He canvassed his system. No ailment was found, and he +investigated again. This time he thought he could detect colicky +symptoms, and he began to encourage them with considerable hope. But +they soon grew feeble, and presently died wholly away. He reflected +further. Suddenly he discovered something. One of his upper front teeth +was loose. This was lucky; he was about to begin to groan, as a +"starter," as he called it, when it occurred to him that if he came +into court with that argument, his aunt would pull it out, and that +would hurt. So he thought he would hold the tooth in reserve for the +present, and seek further. Nothing offered for some little time, and +then he remembered hearing the doctor tell about a certain thing that +laid up a patient for two or three weeks and threatened to make him +lose a finger. So the boy eagerly drew his sore toe from under the +sheet and held it up for inspection. But now he did not know the +necessary symptoms. However, it seemed well worth while to chance it, +so he fell to groaning with considerable spirit. + +But Sid slept on unconscious. + +Tom groaned louder, and fancied that he began to feel pain in the toe. + +No result from Sid. + +Tom was panting with his exertions by this time. He took a rest and +then swelled himself up and fetched a succession of admirable groans. + +Sid snored on. + +Tom was aggravated. He said, "Sid, Sid!" and shook him. This course +worked well, and Tom began to groan again. Sid yawned, stretched, then +brought himself up on his elbow with a snort, and began to stare at +Tom. Tom went on groaning. Sid said: + +"Tom! Say, Tom!" [No response.] "Here, Tom! TOM! What is the matter, +Tom?" And he shook him and looked in his face anxiously. + +Tom moaned out: + +"Oh, don't, Sid. Don't joggle me." + +"Why, what's the matter, Tom? I must call auntie." + +"No--never mind. It'll be over by and by, maybe. Don't call anybody." + +"But I must! DON'T groan so, Tom, it's awful. How long you been this +way?" + +"Hours. Ouch! Oh, don't stir so, Sid, you'll kill me." + +"Tom, why didn't you wake me sooner? Oh, Tom, DON'T! It makes my +flesh crawl to hear you. Tom, what is the matter?" + +"I forgive you everything, Sid. [Groan.] Everything you've ever done +to me. When I'm gone--" + +"Oh, Tom, you ain't dying, are you? Don't, Tom--oh, don't. Maybe--" + +"I forgive everybody, Sid. [Groan.] Tell 'em so, Sid. And Sid, you +give my window-sash and my cat with one eye to that new girl that's +come to town, and tell her--" + +But Sid had snatched his clothes and gone. Tom was suffering in +reality, now, so handsomely was his imagination working, and so his +groans had gathered quite a genuine tone. + +Sid flew down-stairs and said: + +"Oh, Aunt Polly, come! Tom's dying!" + +"Dying!" + +"Yes'm. Don't wait--come quick!" + +"Rubbage! I don't believe it!" + +But she fled up-stairs, nevertheless, with Sid and Mary at her heels. +And her face grew white, too, and her lip trembled. When she reached +the bedside she gasped out: + +"You, Tom! Tom, what's the matter with you?" + +"Oh, auntie, I'm--" + +"What's the matter with you--what is the matter with you, child?" + +"Oh, auntie, my sore toe's mortified!" + +The old lady sank down into a chair and laughed a little, then cried a +little, then did both together. This restored her and she said: + +"Tom, what a turn you did give me. Now you shut up that nonsense and +climb out of this." + +The groans ceased and the pain vanished from the toe. The boy felt a +little foolish, and he said: + +"Aunt Polly, it SEEMED mortified, and it hurt so I never minded my +tooth at all." + +"Your tooth, indeed! What's the matter with your tooth?" + +"One of them's loose, and it aches perfectly awful." + +"There, there, now, don't begin that groaning again. Open your mouth. +Well--your tooth IS loose, but you're not going to die about that. +Mary, get me a silk thread, and a chunk of fire out of the kitchen." + +Tom said: + +"Oh, please, auntie, don't pull it out. It don't hurt any more. I wish +I may never stir if it does. Please don't, auntie. I don't want to stay +home from school." + +"Oh, you don't, don't you? So all this row was because you thought +you'd get to stay home from school and go a-fishing? Tom, Tom, I love +you so, and you seem to try every way you can to break my old heart +with your outrageousness." By this time the dental instruments were +ready. The old lady made one end of the silk thread fast to Tom's tooth +with a loop and tied the other to the bedpost. Then she seized the +chunk of fire and suddenly thrust it almost into the boy's face. The +tooth hung dangling by the bedpost, now. + +But all trials bring their compensations. As Tom wended to school +after breakfast, he was the envy of every boy he met because the gap in +his upper row of teeth enabled him to expectorate in a new and +admirable way. He gathered quite a following of lads interested in the +exhibition; and one that had cut his finger and had been a centre of +fascination and homage up to this time, now found himself suddenly +without an adherent, and shorn of his glory. His heart was heavy, and +he said with a disdain which he did not feel that it wasn't anything to +spit like Tom Sawyer; but another boy said, "Sour grapes!" and he +wandered away a dismantled hero. + +Shortly Tom came upon the juvenile pariah of the village, Huckleberry +Finn, son of the town drunkard. Huckleberry was cordially hated and +dreaded by all the mothers of the town, because he was idle and lawless +and vulgar and bad--and because all their children admired him so, and +delighted in his forbidden society, and wished they dared to be like +him. Tom was like the rest of the respectable boys, in that he envied +Huckleberry his gaudy outcast condition, and was under strict orders +not to play with him. So he played with him every time he got a chance. +Huckleberry was always dressed in the cast-off clothes of full-grown +men, and they were in perennial bloom and fluttering with rags. His hat +was a vast ruin with a wide crescent lopped out of its brim; his coat, +when he wore one, hung nearly to his heels and had the rearward buttons +far down the back; but one suspender supported his trousers; the seat +of the trousers bagged low and contained nothing, the fringed legs +dragged in the dirt when not rolled up. + +Huckleberry came and went, at his own free will. He slept on doorsteps +in fine weather and in empty hogsheads in wet; he did not have to go to +school or to church, or call any being master or obey anybody; he could +go fishing or swimming when and where he chose, and stay as long as it +suited him; nobody forbade him to fight; he could sit up as late as he +pleased; he was always the first boy that went barefoot in the spring +and the last to resume leather in the fall; he never had to wash, nor +put on clean clothes; he could swear wonderfully. In a word, everything +that goes to make life precious that boy had. So thought every +harassed, hampered, respectable boy in St. Petersburg. + +Tom hailed the romantic outcast: + +"Hello, Huckleberry!" + +"Hello yourself, and see how you like it." + +"What's that you got?" + +"Dead cat." + +"Lemme see him, Huck. My, he's pretty stiff. Where'd you get him?" + +"Bought him off'n a boy." + +"What did you give?" + +"I give a blue ticket and a bladder that I got at the slaughter-house." + +"Where'd you get the blue ticket?" + +"Bought it off'n Ben Rogers two weeks ago for a hoop-stick." + +"Say--what is dead cats good for, Huck?" + +"Good for? Cure warts with." + +"No! Is that so? I know something that's better." + +"I bet you don't. What is it?" + +"Why, spunk-water." + +"Spunk-water! I wouldn't give a dern for spunk-water." + +"You wouldn't, wouldn't you? D'you ever try it?" + +"No, I hain't. But Bob Tanner did." + +"Who told you so!" + +"Why, he told Jeff Thatcher, and Jeff told Johnny Baker, and Johnny +told Jim Hollis, and Jim told Ben Rogers, and Ben told a nigger, and +the nigger told me. There now!" + +"Well, what of it? They'll all lie. Leastways all but the nigger. I +don't know HIM. But I never see a nigger that WOULDN'T lie. Shucks! Now +you tell me how Bob Tanner done it, Huck." + +"Why, he took and dipped his hand in a rotten stump where the +rain-water was." + +"In the daytime?" + +"Certainly." + +"With his face to the stump?" + +"Yes. Least I reckon so." + +"Did he say anything?" + +"I don't reckon he did. I don't know." + +"Aha! Talk about trying to cure warts with spunk-water such a blame +fool way as that! Why, that ain't a-going to do any good. You got to go +all by yourself, to the middle of the woods, where you know there's a +spunk-water stump, and just as it's midnight you back up against the +stump and jam your hand in and say: + + 'Barley-corn, barley-corn, injun-meal shorts, + Spunk-water, spunk-water, swaller these warts,' + +and then walk away quick, eleven steps, with your eyes shut, and then +turn around three times and walk home without speaking to anybody. +Because if you speak the charm's busted." + +"Well, that sounds like a good way; but that ain't the way Bob Tanner +done." + +"No, sir, you can bet he didn't, becuz he's the wartiest boy in this +town; and he wouldn't have a wart on him if he'd knowed how to work +spunk-water. I've took off thousands of warts off of my hands that way, +Huck. I play with frogs so much that I've always got considerable many +warts. Sometimes I take 'em off with a bean." + +"Yes, bean's good. I've done that." + +"Have you? What's your way?" + +"You take and split the bean, and cut the wart so as to get some +blood, and then you put the blood on one piece of the bean and take and +dig a hole and bury it 'bout midnight at the crossroads in the dark of +the moon, and then you burn up the rest of the bean. You see that piece +that's got the blood on it will keep drawing and drawing, trying to +fetch the other piece to it, and so that helps the blood to draw the +wart, and pretty soon off she comes." + +"Yes, that's it, Huck--that's it; though when you're burying it if you +say 'Down bean; off wart; come no more to bother me!' it's better. +That's the way Joe Harper does, and he's been nearly to Coonville and +most everywheres. But say--how do you cure 'em with dead cats?" + +"Why, you take your cat and go and get in the graveyard 'long about +midnight when somebody that was wicked has been buried; and when it's +midnight a devil will come, or maybe two or three, but you can't see +'em, you can only hear something like the wind, or maybe hear 'em talk; +and when they're taking that feller away, you heave your cat after 'em +and say, 'Devil follow corpse, cat follow devil, warts follow cat, I'm +done with ye!' That'll fetch ANY wart." + +"Sounds right. D'you ever try it, Huck?" + +"No, but old Mother Hopkins told me." + +"Well, I reckon it's so, then. Becuz they say she's a witch." + +"Say! Why, Tom, I KNOW she is. She witched pap. Pap says so his own +self. He come along one day, and he see she was a-witching him, so he +took up a rock, and if she hadn't dodged, he'd a got her. Well, that +very night he rolled off'n a shed wher' he was a layin drunk, and broke +his arm." + +"Why, that's awful. How did he know she was a-witching him?" + +"Lord, pap can tell, easy. Pap says when they keep looking at you +right stiddy, they're a-witching you. Specially if they mumble. Becuz +when they mumble they're saying the Lord's Prayer backards." + +"Say, Hucky, when you going to try the cat?" + +"To-night. I reckon they'll come after old Hoss Williams to-night." + +"But they buried him Saturday. Didn't they get him Saturday night?" + +"Why, how you talk! How could their charms work till midnight?--and +THEN it's Sunday. Devils don't slosh around much of a Sunday, I don't +reckon." + +"I never thought of that. That's so. Lemme go with you?" + +"Of course--if you ain't afeard." + +"Afeard! 'Tain't likely. Will you meow?" + +"Yes--and you meow back, if you get a chance. Last time, you kep' me +a-meowing around till old Hays went to throwing rocks at me and says +'Dern that cat!' and so I hove a brick through his window--but don't +you tell." + +"I won't. I couldn't meow that night, becuz auntie was watching me, +but I'll meow this time. Say--what's that?" + +"Nothing but a tick." + +"Where'd you get him?" + +"Out in the woods." + +"What'll you take for him?" + +"I don't know. I don't want to sell him." + +"All right. It's a mighty small tick, anyway." + +"Oh, anybody can run a tick down that don't belong to them. I'm +satisfied with it. It's a good enough tick for me." + +"Sho, there's ticks a plenty. I could have a thousand of 'em if I +wanted to." + +"Well, why don't you? Becuz you know mighty well you can't. This is a +pretty early tick, I reckon. It's the first one I've seen this year." + +"Say, Huck--I'll give you my tooth for him." + +"Less see it." + +Tom got out a bit of paper and carefully unrolled it. Huckleberry +viewed it wistfully. The temptation was very strong. At last he said: + +"Is it genuwyne?" + +Tom lifted his lip and showed the vacancy. + +"Well, all right," said Huckleberry, "it's a trade." + +Tom enclosed the tick in the percussion-cap box that had lately been +the pinchbug's prison, and the boys separated, each feeling wealthier +than before. + +When Tom reached the little isolated frame schoolhouse, he strode in +briskly, with the manner of one who had come with all honest speed. +He hung his hat on a peg and flung himself into his seat with +business-like alacrity. The master, throned on high in his great +splint-bottom arm-chair, was dozing, lulled by the drowsy hum of study. +The interruption roused him. + +"Thomas Sawyer!" + +Tom knew that when his name was pronounced in full, it meant trouble. + +"Sir!" + +"Come up here. Now, sir, why are you late again, as usual?" + +Tom was about to take refuge in a lie, when he saw two long tails of +yellow hair hanging down a back that he recognized by the electric +sympathy of love; and by that form was THE ONLY VACANT PLACE on the +girls' side of the schoolhouse. He instantly said: + +"I STOPPED TO TALK WITH HUCKLEBERRY FINN!" + +The master's pulse stood still, and he stared helplessly. The buzz of +study ceased. The pupils wondered if this foolhardy boy had lost his +mind. The master said: + +"You--you did what?" + +"Stopped to talk with Huckleberry Finn." + +There was no mistaking the words. + +"Thomas Sawyer, this is the most astounding confession I have ever +listened to. No mere ferule will answer for this offence. Take off your +jacket." + +The master's arm performed until it was tired and the stock of +switches notably diminished. Then the order followed: + +"Now, sir, go and sit with the girls! And let this be a warning to you." + +The titter that rippled around the room appeared to abash the boy, but +in reality that result was caused rather more by his worshipful awe of +his unknown idol and the dread pleasure that lay in his high good +fortune. He sat down upon the end of the pine bench and the girl +hitched herself away from him with a toss of her head. Nudges and winks +and whispers traversed the room, but Tom sat still, with his arms upon +the long, low desk before him, and seemed to study his book. + +By and by attention ceased from him, and the accustomed school murmur +rose upon the dull air once more. Presently the boy began to steal +furtive glances at the girl. She observed it, "made a mouth" at him and +gave him the back of her head for the space of a minute. When she +cautiously faced around again, a peach lay before her. She thrust it +away. Tom gently put it back. She thrust it away again, but with less +animosity. Tom patiently returned it to its place. Then she let it +remain. Tom scrawled on his slate, "Please take it--I got more." The +girl glanced at the words, but made no sign. Now the boy began to draw +something on the slate, hiding his work with his left hand. For a time +the girl refused to notice; but her human curiosity presently began to +manifest itself by hardly perceptible signs. The boy worked on, +apparently unconscious. The girl made a sort of noncommittal attempt to +see, but the boy did not betray that he was aware of it. At last she +gave in and hesitatingly whispered: + +"Let me see it." + +Tom partly uncovered a dismal caricature of a house with two gable +ends to it and a corkscrew of smoke issuing from the chimney. Then the +girl's interest began to fasten itself upon the work and she forgot +everything else. When it was finished, she gazed a moment, then +whispered: + +"It's nice--make a man." + +The artist erected a man in the front yard, that resembled a derrick. +He could have stepped over the house; but the girl was not +hypercritical; she was satisfied with the monster, and whispered: + +"It's a beautiful man--now make me coming along." + +Tom drew an hour-glass with a full moon and straw limbs to it and +armed the spreading fingers with a portentous fan. The girl said: + +"It's ever so nice--I wish I could draw." + +"It's easy," whispered Tom, "I'll learn you." + +"Oh, will you? When?" + +"At noon. Do you go home to dinner?" + +"I'll stay if you will." + +"Good--that's a whack. What's your name?" + +"Becky Thatcher. What's yours? Oh, I know. It's Thomas Sawyer." + +"That's the name they lick me by. I'm Tom when I'm good. You call me +Tom, will you?" + +"Yes." + +Now Tom began to scrawl something on the slate, hiding the words from +the girl. But she was not backward this time. She begged to see. Tom +said: + +"Oh, it ain't anything." + +"Yes it is." + +"No it ain't. You don't want to see." + +"Yes I do, indeed I do. Please let me." + +"You'll tell." + +"No I won't--deed and deed and double deed won't." + +"You won't tell anybody at all? Ever, as long as you live?" + +"No, I won't ever tell ANYbody. Now let me." + +"Oh, YOU don't want to see!" + +"Now that you treat me so, I WILL see." And she put her small hand +upon his and a little scuffle ensued, Tom pretending to resist in +earnest but letting his hand slip by degrees till these words were +revealed: "I LOVE YOU." + +"Oh, you bad thing!" And she hit his hand a smart rap, but reddened +and looked pleased, nevertheless. + +Just at this juncture the boy felt a slow, fateful grip closing on his +ear, and a steady lifting impulse. In that wise he was borne across the +house and deposited in his own seat, under a peppering fire of giggles +from the whole school. Then the master stood over him during a few +awful moments, and finally moved away to his throne without saying a +word. But although Tom's ear tingled, his heart was jubilant. + +As the school quieted down Tom made an honest effort to study, but the +turmoil within him was too great. In turn he took his place in the +reading class and made a botch of it; then in the geography class and +turned lakes into mountains, mountains into rivers, and rivers into +continents, till chaos was come again; then in the spelling class, and +got "turned down," by a succession of mere baby words, till he brought +up at the foot and yielded up the pewter medal which he had worn with +ostentation for months. + + + +CHAPTER VII + +THE harder Tom tried to fasten his mind on his book, the more his +ideas wandered. So at last, with a sigh and a yawn, he gave it up. It +seemed to him that the noon recess would never come. The air was +utterly dead. There was not a breath stirring. It was the sleepiest of +sleepy days. The drowsing murmur of the five and twenty studying +scholars soothed the soul like the spell that is in the murmur of bees. +Away off in the flaming sunshine, Cardiff Hill lifted its soft green +sides through a shimmering veil of heat, tinted with the purple of +distance; a few birds floated on lazy wing high in the air; no other +living thing was visible but some cows, and they were asleep. Tom's +heart ached to be free, or else to have something of interest to do to +pass the dreary time. His hand wandered into his pocket and his face +lit up with a glow of gratitude that was prayer, though he did not know +it. Then furtively the percussion-cap box came out. He released the +tick and put him on the long flat desk. The creature probably glowed +with a gratitude that amounted to prayer, too, at this moment, but it +was premature: for when he started thankfully to travel off, Tom turned +him aside with a pin and made him take a new direction. + +Tom's bosom friend sat next him, suffering just as Tom had been, and +now he was deeply and gratefully interested in this entertainment in an +instant. This bosom friend was Joe Harper. The two boys were sworn +friends all the week, and embattled enemies on Saturdays. Joe took a +pin out of his lapel and began to assist in exercising the prisoner. +The sport grew in interest momently. Soon Tom said that they were +interfering with each other, and neither getting the fullest benefit of +the tick. So he put Joe's slate on the desk and drew a line down the +middle of it from top to bottom. + +"Now," said he, "as long as he is on your side you can stir him up and +I'll let him alone; but if you let him get away and get on my side, +you're to leave him alone as long as I can keep him from crossing over." + +"All right, go ahead; start him up." + +The tick escaped from Tom, presently, and crossed the equator. Joe +harassed him awhile, and then he got away and crossed back again. This +change of base occurred often. While one boy was worrying the tick with +absorbing interest, the other would look on with interest as strong, +the two heads bowed together over the slate, and the two souls dead to +all things else. At last luck seemed to settle and abide with Joe. The +tick tried this, that, and the other course, and got as excited and as +anxious as the boys themselves, but time and again just as he would +have victory in his very grasp, so to speak, and Tom's fingers would be +twitching to begin, Joe's pin would deftly head him off, and keep +possession. At last Tom could stand it no longer. The temptation was +too strong. So he reached out and lent a hand with his pin. Joe was +angry in a moment. Said he: + +"Tom, you let him alone." + +"I only just want to stir him up a little, Joe." + +"No, sir, it ain't fair; you just let him alone." + +"Blame it, I ain't going to stir him much." + +"Let him alone, I tell you." + +"I won't!" + +"You shall--he's on my side of the line." + +"Look here, Joe Harper, whose is that tick?" + +"I don't care whose tick he is--he's on my side of the line, and you +sha'n't touch him." + +"Well, I'll just bet I will, though. He's my tick and I'll do what I +blame please with him, or die!" + +A tremendous whack came down on Tom's shoulders, and its duplicate on +Joe's; and for the space of two minutes the dust continued to fly from +the two jackets and the whole school to enjoy it. The boys had been too +absorbed to notice the hush that had stolen upon the school awhile +before when the master came tiptoeing down the room and stood over +them. He had contemplated a good part of the performance before he +contributed his bit of variety to it. + +When school broke up at noon, Tom flew to Becky Thatcher, and +whispered in her ear: + +"Put on your bonnet and let on you're going home; and when you get to +the corner, give the rest of 'em the slip, and turn down through the +lane and come back. I'll go the other way and come it over 'em the same +way." + +So the one went off with one group of scholars, and the other with +another. In a little while the two met at the bottom of the lane, and +when they reached the school they had it all to themselves. Then they +sat together, with a slate before them, and Tom gave Becky the pencil +and held her hand in his, guiding it, and so created another surprising +house. When the interest in art began to wane, the two fell to talking. +Tom was swimming in bliss. He said: + +"Do you love rats?" + +"No! I hate them!" + +"Well, I do, too--LIVE ones. But I mean dead ones, to swing round your +head with a string." + +"No, I don't care for rats much, anyway. What I like is chewing-gum." + +"Oh, I should say so! I wish I had some now." + +"Do you? I've got some. I'll let you chew it awhile, but you must give +it back to me." + +That was agreeable, so they chewed it turn about, and dangled their +legs against the bench in excess of contentment. + +"Was you ever at a circus?" said Tom. + +"Yes, and my pa's going to take me again some time, if I'm good." + +"I been to the circus three or four times--lots of times. Church ain't +shucks to a circus. There's things going on at a circus all the time. +I'm going to be a clown in a circus when I grow up." + +"Oh, are you! That will be nice. They're so lovely, all spotted up." + +"Yes, that's so. And they get slathers of money--most a dollar a day, +Ben Rogers says. Say, Becky, was you ever engaged?" + +"What's that?" + +"Why, engaged to be married." + +"No." + +"Would you like to?" + +"I reckon so. I don't know. What is it like?" + +"Like? Why it ain't like anything. You only just tell a boy you won't +ever have anybody but him, ever ever ever, and then you kiss and that's +all. Anybody can do it." + +"Kiss? What do you kiss for?" + +"Why, that, you know, is to--well, they always do that." + +"Everybody?" + +"Why, yes, everybody that's in love with each other. Do you remember +what I wrote on the slate?" + +"Ye--yes." + +"What was it?" + +"I sha'n't tell you." + +"Shall I tell YOU?" + +"Ye--yes--but some other time." + +"No, now." + +"No, not now--to-morrow." + +"Oh, no, NOW. Please, Becky--I'll whisper it, I'll whisper it ever so +easy." + +Becky hesitating, Tom took silence for consent, and passed his arm +about her waist and whispered the tale ever so softly, with his mouth +close to her ear. And then he added: + +"Now you whisper it to me--just the same." + +She resisted, for a while, and then said: + +"You turn your face away so you can't see, and then I will. But you +mustn't ever tell anybody--WILL you, Tom? Now you won't, WILL you?" + +"No, indeed, indeed I won't. Now, Becky." + +He turned his face away. She bent timidly around till her breath +stirred his curls and whispered, "I--love--you!" + +Then she sprang away and ran around and around the desks and benches, +with Tom after her, and took refuge in a corner at last, with her +little white apron to her face. Tom clasped her about her neck and +pleaded: + +"Now, Becky, it's all done--all over but the kiss. Don't you be afraid +of that--it ain't anything at all. Please, Becky." And he tugged at her +apron and the hands. + +By and by she gave up, and let her hands drop; her face, all glowing +with the struggle, came up and submitted. Tom kissed the red lips and +said: + +"Now it's all done, Becky. And always after this, you know, you ain't +ever to love anybody but me, and you ain't ever to marry anybody but +me, ever never and forever. Will you?" + +"No, I'll never love anybody but you, Tom, and I'll never marry +anybody but you--and you ain't to ever marry anybody but me, either." + +"Certainly. Of course. That's PART of it. And always coming to school +or when we're going home, you're to walk with me, when there ain't +anybody looking--and you choose me and I choose you at parties, because +that's the way you do when you're engaged." + +"It's so nice. I never heard of it before." + +"Oh, it's ever so gay! Why, me and Amy Lawrence--" + +The big eyes told Tom his blunder and he stopped, confused. + +"Oh, Tom! Then I ain't the first you've ever been engaged to!" + +The child began to cry. Tom said: + +"Oh, don't cry, Becky, I don't care for her any more." + +"Yes, you do, Tom--you know you do." + +Tom tried to put his arm about her neck, but she pushed him away and +turned her face to the wall, and went on crying. Tom tried again, with +soothing words in his mouth, and was repulsed again. Then his pride was +up, and he strode away and went outside. He stood about, restless and +uneasy, for a while, glancing at the door, every now and then, hoping +she would repent and come to find him. But she did not. Then he began +to feel badly and fear that he was in the wrong. It was a hard struggle +with him to make new advances, now, but he nerved himself to it and +entered. She was still standing back there in the corner, sobbing, with +her face to the wall. Tom's heart smote him. He went to her and stood a +moment, not knowing exactly how to proceed. Then he said hesitatingly: + +"Becky, I--I don't care for anybody but you." + +No reply--but sobs. + +"Becky"--pleadingly. "Becky, won't you say something?" + +More sobs. + +Tom got out his chiefest jewel, a brass knob from the top of an +andiron, and passed it around her so that she could see it, and said: + +"Please, Becky, won't you take it?" + +She struck it to the floor. Then Tom marched out of the house and over +the hills and far away, to return to school no more that day. Presently +Becky began to suspect. She ran to the door; he was not in sight; she +flew around to the play-yard; he was not there. Then she called: + +"Tom! Come back, Tom!" + +She listened intently, but there was no answer. She had no companions +but silence and loneliness. So she sat down to cry again and upbraid +herself; and by this time the scholars began to gather again, and she +had to hide her griefs and still her broken heart and take up the cross +of a long, dreary, aching afternoon, with none among the strangers +about her to exchange sorrows with. + + + +CHAPTER VIII + +TOM dodged hither and thither through lanes until he was well out of +the track of returning scholars, and then fell into a moody jog. He +crossed a small "branch" two or three times, because of a prevailing +juvenile superstition that to cross water baffled pursuit. Half an hour +later he was disappearing behind the Douglas mansion on the summit of +Cardiff Hill, and the schoolhouse was hardly distinguishable away off +in the valley behind him. He entered a dense wood, picked his pathless +way to the centre of it, and sat down on a mossy spot under a spreading +oak. There was not even a zephyr stirring; the dead noonday heat had +even stilled the songs of the birds; nature lay in a trance that was +broken by no sound but the occasional far-off hammering of a +woodpecker, and this seemed to render the pervading silence and sense +of loneliness the more profound. The boy's soul was steeped in +melancholy; his feelings were in happy accord with his surroundings. He +sat long with his elbows on his knees and his chin in his hands, +meditating. It seemed to him that life was but a trouble, at best, and +he more than half envied Jimmy Hodges, so lately released; it must be +very peaceful, he thought, to lie and slumber and dream forever and +ever, with the wind whispering through the trees and caressing the +grass and the flowers over the grave, and nothing to bother and grieve +about, ever any more. If he only had a clean Sunday-school record he +could be willing to go, and be done with it all. Now as to this girl. +What had he done? Nothing. He had meant the best in the world, and been +treated like a dog--like a very dog. She would be sorry some day--maybe +when it was too late. Ah, if he could only die TEMPORARILY! + +But the elastic heart of youth cannot be compressed into one +constrained shape long at a time. Tom presently began to drift +insensibly back into the concerns of this life again. What if he turned +his back, now, and disappeared mysteriously? What if he went away--ever +so far away, into unknown countries beyond the seas--and never came +back any more! How would she feel then! The idea of being a clown +recurred to him now, only to fill him with disgust. For frivolity and +jokes and spotted tights were an offense, when they intruded themselves +upon a spirit that was exalted into the vague august realm of the +romantic. No, he would be a soldier, and return after long years, all +war-worn and illustrious. No--better still, he would join the Indians, +and hunt buffaloes and go on the warpath in the mountain ranges and the +trackless great plains of the Far West, and away in the future come +back a great chief, bristling with feathers, hideous with paint, and +prance into Sunday-school, some drowsy summer morning, with a +bloodcurdling war-whoop, and sear the eyeballs of all his companions +with unappeasable envy. But no, there was something gaudier even than +this. He would be a pirate! That was it! NOW his future lay plain +before him, and glowing with unimaginable splendor. How his name would +fill the world, and make people shudder! How gloriously he would go +plowing the dancing seas, in his long, low, black-hulled racer, the +Spirit of the Storm, with his grisly flag flying at the fore! And at +the zenith of his fame, how he would suddenly appear at the old village +and stalk into church, brown and weather-beaten, in his black velvet +doublet and trunks, his great jack-boots, his crimson sash, his belt +bristling with horse-pistols, his crime-rusted cutlass at his side, his +slouch hat with waving plumes, his black flag unfurled, with the skull +and crossbones on it, and hear with swelling ecstasy the whisperings, +"It's Tom Sawyer the Pirate!--the Black Avenger of the Spanish Main!" + +Yes, it was settled; his career was determined. He would run away from +home and enter upon it. He would start the very next morning. Therefore +he must now begin to get ready. He would collect his resources +together. He went to a rotten log near at hand and began to dig under +one end of it with his Barlow knife. He soon struck wood that sounded +hollow. He put his hand there and uttered this incantation impressively: + +"What hasn't come here, come! What's here, stay here!" + +Then he scraped away the dirt, and exposed a pine shingle. He took it +up and disclosed a shapely little treasure-house whose bottom and sides +were of shingles. In it lay a marble. Tom's astonishment was boundless! +He scratched his head with a perplexed air, and said: + +"Well, that beats anything!" + +Then he tossed the marble away pettishly, and stood cogitating. The +truth was, that a superstition of his had failed, here, which he and +all his comrades had always looked upon as infallible. If you buried a +marble with certain necessary incantations, and left it alone a +fortnight, and then opened the place with the incantation he had just +used, you would find that all the marbles you had ever lost had +gathered themselves together there, meantime, no matter how widely they +had been separated. But now, this thing had actually and unquestionably +failed. Tom's whole structure of faith was shaken to its foundations. +He had many a time heard of this thing succeeding but never of its +failing before. It did not occur to him that he had tried it several +times before, himself, but could never find the hiding-places +afterward. He puzzled over the matter some time, and finally decided +that some witch had interfered and broken the charm. He thought he +would satisfy himself on that point; so he searched around till he +found a small sandy spot with a little funnel-shaped depression in it. +He laid himself down and put his mouth close to this depression and +called-- + +"Doodle-bug, doodle-bug, tell me what I want to know! Doodle-bug, +doodle-bug, tell me what I want to know!" + +The sand began to work, and presently a small black bug appeared for a +second and then darted under again in a fright. + +"He dasn't tell! So it WAS a witch that done it. I just knowed it." + +He well knew the futility of trying to contend against witches, so he +gave up discouraged. But it occurred to him that he might as well have +the marble he had just thrown away, and therefore he went and made a +patient search for it. But he could not find it. Now he went back to +his treasure-house and carefully placed himself just as he had been +standing when he tossed the marble away; then he took another marble +from his pocket and tossed it in the same way, saying: + +"Brother, go find your brother!" + +He watched where it stopped, and went there and looked. But it must +have fallen short or gone too far; so he tried twice more. The last +repetition was successful. The two marbles lay within a foot of each +other. + +Just here the blast of a toy tin trumpet came faintly down the green +aisles of the forest. Tom flung off his jacket and trousers, turned a +suspender into a belt, raked away some brush behind the rotten log, +disclosing a rude bow and arrow, a lath sword and a tin trumpet, and in +a moment had seized these things and bounded away, barelegged, with +fluttering shirt. He presently halted under a great elm, blew an +answering blast, and then began to tiptoe and look warily out, this way +and that. He said cautiously--to an imaginary company: + +"Hold, my merry men! Keep hid till I blow." + +Now appeared Joe Harper, as airily clad and elaborately armed as Tom. +Tom called: + +"Hold! Who comes here into Sherwood Forest without my pass?" + +"Guy of Guisborne wants no man's pass. Who art thou that--that--" + +"Dares to hold such language," said Tom, prompting--for they talked +"by the book," from memory. + +"Who art thou that dares to hold such language?" + +"I, indeed! I am Robin Hood, as thy caitiff carcase soon shall know." + +"Then art thou indeed that famous outlaw? Right gladly will I dispute +with thee the passes of the merry wood. Have at thee!" + +They took their lath swords, dumped their other traps on the ground, +struck a fencing attitude, foot to foot, and began a grave, careful +combat, "two up and two down." Presently Tom said: + +"Now, if you've got the hang, go it lively!" + +So they "went it lively," panting and perspiring with the work. By and +by Tom shouted: + +"Fall! fall! Why don't you fall?" + +"I sha'n't! Why don't you fall yourself? You're getting the worst of +it." + +"Why, that ain't anything. I can't fall; that ain't the way it is in +the book. The book says, 'Then with one back-handed stroke he slew poor +Guy of Guisborne.' You're to turn around and let me hit you in the +back." + +There was no getting around the authorities, so Joe turned, received +the whack and fell. + +"Now," said Joe, getting up, "you got to let me kill YOU. That's fair." + +"Why, I can't do that, it ain't in the book." + +"Well, it's blamed mean--that's all." + +"Well, say, Joe, you can be Friar Tuck or Much the miller's son, and +lam me with a quarter-staff; or I'll be the Sheriff of Nottingham and +you be Robin Hood a little while and kill me." + +This was satisfactory, and so these adventures were carried out. Then +Tom became Robin Hood again, and was allowed by the treacherous nun to +bleed his strength away through his neglected wound. And at last Joe, +representing a whole tribe of weeping outlaws, dragged him sadly forth, +gave his bow into his feeble hands, and Tom said, "Where this arrow +falls, there bury poor Robin Hood under the greenwood tree." Then he +shot the arrow and fell back and would have died, but he lit on a +nettle and sprang up too gaily for a corpse. + +The boys dressed themselves, hid their accoutrements, and went off +grieving that there were no outlaws any more, and wondering what modern +civilization could claim to have done to compensate for their loss. +They said they would rather be outlaws a year in Sherwood Forest than +President of the United States forever. + + + +CHAPTER IX + +AT half-past nine, that night, Tom and Sid were sent to bed, as usual. +They said their prayers, and Sid was soon asleep. Tom lay awake and +waited, in restless impatience. When it seemed to him that it must be +nearly daylight, he heard the clock strike ten! This was despair. He +would have tossed and fidgeted, as his nerves demanded, but he was +afraid he might wake Sid. So he lay still, and stared up into the dark. +Everything was dismally still. By and by, out of the stillness, little, +scarcely perceptible noises began to emphasize themselves. The ticking +of the clock began to bring itself into notice. Old beams began to +crack mysteriously. The stairs creaked faintly. Evidently spirits were +abroad. A measured, muffled snore issued from Aunt Polly's chamber. And +now the tiresome chirping of a cricket that no human ingenuity could +locate, began. Next the ghastly ticking of a deathwatch in the wall at +the bed's head made Tom shudder--it meant that somebody's days were +numbered. Then the howl of a far-off dog rose on the night air, and was +answered by a fainter howl from a remoter distance. Tom was in an +agony. At last he was satisfied that time had ceased and eternity +begun; he began to doze, in spite of himself; the clock chimed eleven, +but he did not hear it. And then there came, mingling with his +half-formed dreams, a most melancholy caterwauling. The raising of a +neighboring window disturbed him. A cry of "Scat! you devil!" and the +crash of an empty bottle against the back of his aunt's woodshed +brought him wide awake, and a single minute later he was dressed and +out of the window and creeping along the roof of the "ell" on all +fours. He "meow'd" with caution once or twice, as he went; then jumped +to the roof of the woodshed and thence to the ground. Huckleberry Finn +was there, with his dead cat. The boys moved off and disappeared in the +gloom. At the end of half an hour they were wading through the tall +grass of the graveyard. + +It was a graveyard of the old-fashioned Western kind. It was on a +hill, about a mile and a half from the village. It had a crazy board +fence around it, which leaned inward in places, and outward the rest of +the time, but stood upright nowhere. Grass and weeds grew rank over the +whole cemetery. All the old graves were sunken in, there was not a +tombstone on the place; round-topped, worm-eaten boards staggered over +the graves, leaning for support and finding none. "Sacred to the memory +of" So-and-So had been painted on them once, but it could no longer +have been read, on the most of them, now, even if there had been light. + +A faint wind moaned through the trees, and Tom feared it might be the +spirits of the dead, complaining at being disturbed. The boys talked +little, and only under their breath, for the time and the place and the +pervading solemnity and silence oppressed their spirits. They found the +sharp new heap they were seeking, and ensconced themselves within the +protection of three great elms that grew in a bunch within a few feet +of the grave. + +Then they waited in silence for what seemed a long time. The hooting +of a distant owl was all the sound that troubled the dead stillness. +Tom's reflections grew oppressive. He must force some talk. So he said +in a whisper: + +"Hucky, do you believe the dead people like it for us to be here?" + +Huckleberry whispered: + +"I wisht I knowed. It's awful solemn like, AIN'T it?" + +"I bet it is." + +There was a considerable pause, while the boys canvassed this matter +inwardly. Then Tom whispered: + +"Say, Hucky--do you reckon Hoss Williams hears us talking?" + +"O' course he does. Least his sperrit does." + +Tom, after a pause: + +"I wish I'd said Mister Williams. But I never meant any harm. +Everybody calls him Hoss." + +"A body can't be too partic'lar how they talk 'bout these-yer dead +people, Tom." + +This was a damper, and conversation died again. + +Presently Tom seized his comrade's arm and said: + +"Sh!" + +"What is it, Tom?" And the two clung together with beating hearts. + +"Sh! There 'tis again! Didn't you hear it?" + +"I--" + +"There! Now you hear it." + +"Lord, Tom, they're coming! They're coming, sure. What'll we do?" + +"I dono. Think they'll see us?" + +"Oh, Tom, they can see in the dark, same as cats. I wisht I hadn't +come." + +"Oh, don't be afeard. I don't believe they'll bother us. We ain't +doing any harm. If we keep perfectly still, maybe they won't notice us +at all." + +"I'll try to, Tom, but, Lord, I'm all of a shiver." + +"Listen!" + +The boys bent their heads together and scarcely breathed. A muffled +sound of voices floated up from the far end of the graveyard. + +"Look! See there!" whispered Tom. "What is it?" + +"It's devil-fire. Oh, Tom, this is awful." + +Some vague figures approached through the gloom, swinging an +old-fashioned tin lantern that freckled the ground with innumerable +little spangles of light. Presently Huckleberry whispered with a +shudder: + +"It's the devils sure enough. Three of 'em! Lordy, Tom, we're goners! +Can you pray?" + +"I'll try, but don't you be afeard. They ain't going to hurt us. 'Now +I lay me down to sleep, I--'" + +"Sh!" + +"What is it, Huck?" + +"They're HUMANS! One of 'em is, anyway. One of 'em's old Muff Potter's +voice." + +"No--'tain't so, is it?" + +"I bet I know it. Don't you stir nor budge. He ain't sharp enough to +notice us. Drunk, the same as usual, likely--blamed old rip!" + +"All right, I'll keep still. Now they're stuck. Can't find it. Here +they come again. Now they're hot. Cold again. Hot again. Red hot! +They're p'inted right, this time. Say, Huck, I know another o' them +voices; it's Injun Joe." + +"That's so--that murderin' half-breed! I'd druther they was devils a +dern sight. What kin they be up to?" + +The whisper died wholly out, now, for the three men had reached the +grave and stood within a few feet of the boys' hiding-place. + +"Here it is," said the third voice; and the owner of it held the +lantern up and revealed the face of young Doctor Robinson. + +Potter and Injun Joe were carrying a handbarrow with a rope and a +couple of shovels on it. They cast down their load and began to open +the grave. The doctor put the lantern at the head of the grave and came +and sat down with his back against one of the elm trees. He was so +close the boys could have touched him. + +"Hurry, men!" he said, in a low voice; "the moon might come out at any +moment." + +They growled a response and went on digging. For some time there was +no noise but the grating sound of the spades discharging their freight +of mould and gravel. It was very monotonous. Finally a spade struck +upon the coffin with a dull woody accent, and within another minute or +two the men had hoisted it out on the ground. They pried off the lid +with their shovels, got out the body and dumped it rudely on the +ground. The moon drifted from behind the clouds and exposed the pallid +face. The barrow was got ready and the corpse placed on it, covered +with a blanket, and bound to its place with the rope. Potter took out a +large spring-knife and cut off the dangling end of the rope and then +said: + +"Now the cussed thing's ready, Sawbones, and you'll just out with +another five, or here she stays." + +"That's the talk!" said Injun Joe. + +"Look here, what does this mean?" said the doctor. "You required your +pay in advance, and I've paid you." + +"Yes, and you done more than that," said Injun Joe, approaching the +doctor, who was now standing. "Five years ago you drove me away from +your father's kitchen one night, when I come to ask for something to +eat, and you said I warn't there for any good; and when I swore I'd get +even with you if it took a hundred years, your father had me jailed for +a vagrant. Did you think I'd forget? The Injun blood ain't in me for +nothing. And now I've GOT you, and you got to SETTLE, you know!" + +He was threatening the doctor, with his fist in his face, by this +time. The doctor struck out suddenly and stretched the ruffian on the +ground. Potter dropped his knife, and exclaimed: + +"Here, now, don't you hit my pard!" and the next moment he had +grappled with the doctor and the two were struggling with might and +main, trampling the grass and tearing the ground with their heels. +Injun Joe sprang to his feet, his eyes flaming with passion, snatched +up Potter's knife, and went creeping, catlike and stooping, round and +round about the combatants, seeking an opportunity. All at once the +doctor flung himself free, seized the heavy headboard of Williams' +grave and felled Potter to the earth with it--and in the same instant +the half-breed saw his chance and drove the knife to the hilt in the +young man's breast. He reeled and fell partly upon Potter, flooding him +with his blood, and in the same moment the clouds blotted out the +dreadful spectacle and the two frightened boys went speeding away in +the dark. + +Presently, when the moon emerged again, Injun Joe was standing over +the two forms, contemplating them. The doctor murmured inarticulately, +gave a long gasp or two and was still. The half-breed muttered: + +"THAT score is settled--damn you." + +Then he robbed the body. After which he put the fatal knife in +Potter's open right hand, and sat down on the dismantled coffin. Three +--four--five minutes passed, and then Potter began to stir and moan. His +hand closed upon the knife; he raised it, glanced at it, and let it +fall, with a shudder. Then he sat up, pushing the body from him, and +gazed at it, and then around him, confusedly. His eyes met Joe's. + +"Lord, how is this, Joe?" he said. + +"It's a dirty business," said Joe, without moving. + +"What did you do it for?" + +"I! I never done it!" + +"Look here! That kind of talk won't wash." + +Potter trembled and grew white. + +"I thought I'd got sober. I'd no business to drink to-night. But it's +in my head yet--worse'n when we started here. I'm all in a muddle; +can't recollect anything of it, hardly. Tell me, Joe--HONEST, now, old +feller--did I do it? Joe, I never meant to--'pon my soul and honor, I +never meant to, Joe. Tell me how it was, Joe. Oh, it's awful--and him +so young and promising." + +"Why, you two was scuffling, and he fetched you one with the headboard +and you fell flat; and then up you come, all reeling and staggering +like, and snatched the knife and jammed it into him, just as he fetched +you another awful clip--and here you've laid, as dead as a wedge til +now." + +"Oh, I didn't know what I was a-doing. I wish I may die this minute if +I did. It was all on account of the whiskey and the excitement, I +reckon. I never used a weepon in my life before, Joe. I've fought, but +never with weepons. They'll all say that. Joe, don't tell! Say you +won't tell, Joe--that's a good feller. I always liked you, Joe, and +stood up for you, too. Don't you remember? You WON'T tell, WILL you, +Joe?" And the poor creature dropped on his knees before the stolid +murderer, and clasped his appealing hands. + +"No, you've always been fair and square with me, Muff Potter, and I +won't go back on you. There, now, that's as fair as a man can say." + +"Oh, Joe, you're an angel. I'll bless you for this the longest day I +live." And Potter began to cry. + +"Come, now, that's enough of that. This ain't any time for blubbering. +You be off yonder way and I'll go this. Move, now, and don't leave any +tracks behind you." + +Potter started on a trot that quickly increased to a run. The +half-breed stood looking after him. He muttered: + +"If he's as much stunned with the lick and fuddled with the rum as he +had the look of being, he won't think of the knife till he's gone so +far he'll be afraid to come back after it to such a place by himself +--chicken-heart!" + +Two or three minutes later the murdered man, the blanketed corpse, the +lidless coffin, and the open grave were under no inspection but the +moon's. The stillness was complete again, too. + + + +CHAPTER X + +THE two boys flew on and on, toward the village, speechless with +horror. They glanced backward over their shoulders from time to time, +apprehensively, as if they feared they might be followed. Every stump +that started up in their path seemed a man and an enemy, and made them +catch their breath; and as they sped by some outlying cottages that lay +near the village, the barking of the aroused watch-dogs seemed to give +wings to their feet. + +"If we can only get to the old tannery before we break down!" +whispered Tom, in short catches between breaths. "I can't stand it much +longer." + +Huckleberry's hard pantings were his only reply, and the boys fixed +their eyes on the goal of their hopes and bent to their work to win it. +They gained steadily on it, and at last, breast to breast, they burst +through the open door and fell grateful and exhausted in the sheltering +shadows beyond. By and by their pulses slowed down, and Tom whispered: + +"Huckleberry, what do you reckon'll come of this?" + +"If Doctor Robinson dies, I reckon hanging'll come of it." + +"Do you though?" + +"Why, I KNOW it, Tom." + +Tom thought a while, then he said: + +"Who'll tell? We?" + +"What are you talking about? S'pose something happened and Injun Joe +DIDN'T hang? Why, he'd kill us some time or other, just as dead sure as +we're a laying here." + +"That's just what I was thinking to myself, Huck." + +"If anybody tells, let Muff Potter do it, if he's fool enough. He's +generally drunk enough." + +Tom said nothing--went on thinking. Presently he whispered: + +"Huck, Muff Potter don't know it. How can he tell?" + +"What's the reason he don't know it?" + +"Because he'd just got that whack when Injun Joe done it. D'you reckon +he could see anything? D'you reckon he knowed anything?" + +"By hokey, that's so, Tom!" + +"And besides, look-a-here--maybe that whack done for HIM!" + +"No, 'taint likely, Tom. He had liquor in him; I could see that; and +besides, he always has. Well, when pap's full, you might take and belt +him over the head with a church and you couldn't phase him. He says so, +his own self. So it's the same with Muff Potter, of course. But if a +man was dead sober, I reckon maybe that whack might fetch him; I dono." + +After another reflective silence, Tom said: + +"Hucky, you sure you can keep mum?" + +"Tom, we GOT to keep mum. You know that. That Injun devil wouldn't +make any more of drownding us than a couple of cats, if we was to +squeak 'bout this and they didn't hang him. Now, look-a-here, Tom, less +take and swear to one another--that's what we got to do--swear to keep +mum." + +"I'm agreed. It's the best thing. Would you just hold hands and swear +that we--" + +"Oh no, that wouldn't do for this. That's good enough for little +rubbishy common things--specially with gals, cuz THEY go back on you +anyway, and blab if they get in a huff--but there orter be writing +'bout a big thing like this. And blood." + +Tom's whole being applauded this idea. It was deep, and dark, and +awful; the hour, the circumstances, the surroundings, were in keeping +with it. He picked up a clean pine shingle that lay in the moonlight, +took a little fragment of "red keel" out of his pocket, got the moon on +his work, and painfully scrawled these lines, emphasizing each slow +down-stroke by clamping his tongue between his teeth, and letting up +the pressure on the up-strokes. [See next page.] + + "Huck Finn and + Tom Sawyer swears + they will keep mum + about This and They + wish They may Drop + down dead in Their + Tracks if They ever + Tell and Rot." + +Huckleberry was filled with admiration of Tom's facility in writing, +and the sublimity of his language. He at once took a pin from his lapel +and was going to prick his flesh, but Tom said: + +"Hold on! Don't do that. A pin's brass. It might have verdigrease on +it." + +"What's verdigrease?" + +"It's p'ison. That's what it is. You just swaller some of it once +--you'll see." + +So Tom unwound the thread from one of his needles, and each boy +pricked the ball of his thumb and squeezed out a drop of blood. In +time, after many squeezes, Tom managed to sign his initials, using the +ball of his little finger for a pen. Then he showed Huckleberry how to +make an H and an F, and the oath was complete. They buried the shingle +close to the wall, with some dismal ceremonies and incantations, and +the fetters that bound their tongues were considered to be locked and +the key thrown away. + +A figure crept stealthily through a break in the other end of the +ruined building, now, but they did not notice it. + +"Tom," whispered Huckleberry, "does this keep us from EVER telling +--ALWAYS?" + +"Of course it does. It don't make any difference WHAT happens, we got +to keep mum. We'd drop down dead--don't YOU know that?" + +"Yes, I reckon that's so." + +They continued to whisper for some little time. Presently a dog set up +a long, lugubrious howl just outside--within ten feet of them. The boys +clasped each other suddenly, in an agony of fright. + +"Which of us does he mean?" gasped Huckleberry. + +"I dono--peep through the crack. Quick!" + +"No, YOU, Tom!" + +"I can't--I can't DO it, Huck!" + +"Please, Tom. There 'tis again!" + +"Oh, lordy, I'm thankful!" whispered Tom. "I know his voice. It's Bull +Harbison." * + +[* If Mr. Harbison owned a slave named Bull, Tom would have spoken of +him as "Harbison's Bull," but a son or a dog of that name was "Bull +Harbison."] + +"Oh, that's good--I tell you, Tom, I was most scared to death; I'd a +bet anything it was a STRAY dog." + +The dog howled again. The boys' hearts sank once more. + +"Oh, my! that ain't no Bull Harbison!" whispered Huckleberry. "DO, Tom!" + +Tom, quaking with fear, yielded, and put his eye to the crack. His +whisper was hardly audible when he said: + +"Oh, Huck, IT S A STRAY DOG!" + +"Quick, Tom, quick! Who does he mean?" + +"Huck, he must mean us both--we're right together." + +"Oh, Tom, I reckon we're goners. I reckon there ain't no mistake 'bout +where I'LL go to. I been so wicked." + +"Dad fetch it! This comes of playing hookey and doing everything a +feller's told NOT to do. I might a been good, like Sid, if I'd a tried +--but no, I wouldn't, of course. But if ever I get off this time, I lay +I'll just WALLER in Sunday-schools!" And Tom began to snuffle a little. + +"YOU bad!" and Huckleberry began to snuffle too. "Consound it, Tom +Sawyer, you're just old pie, 'longside o' what I am. Oh, LORDY, lordy, +lordy, I wisht I only had half your chance." + +Tom choked off and whispered: + +"Look, Hucky, look! He's got his BACK to us!" + +Hucky looked, with joy in his heart. + +"Well, he has, by jingoes! Did he before?" + +"Yes, he did. But I, like a fool, never thought. Oh, this is bully, +you know. NOW who can he mean?" + +The howling stopped. Tom pricked up his ears. + +"Sh! What's that?" he whispered. + +"Sounds like--like hogs grunting. No--it's somebody snoring, Tom." + +"That IS it! Where 'bouts is it, Huck?" + +"I bleeve it's down at 'tother end. Sounds so, anyway. Pap used to +sleep there, sometimes, 'long with the hogs, but laws bless you, he +just lifts things when HE snores. Besides, I reckon he ain't ever +coming back to this town any more." + +The spirit of adventure rose in the boys' souls once more. + +"Hucky, do you das't to go if I lead?" + +"I don't like to, much. Tom, s'pose it's Injun Joe!" + +Tom quailed. But presently the temptation rose up strong again and the +boys agreed to try, with the understanding that they would take to +their heels if the snoring stopped. So they went tiptoeing stealthily +down, the one behind the other. When they had got to within five steps +of the snorer, Tom stepped on a stick, and it broke with a sharp snap. +The man moaned, writhed a little, and his face came into the moonlight. +It was Muff Potter. The boys' hearts had stood still, and their hopes +too, when the man moved, but their fears passed away now. They tiptoed +out, through the broken weather-boarding, and stopped at a little +distance to exchange a parting word. That long, lugubrious howl rose on +the night air again! They turned and saw the strange dog standing +within a few feet of where Potter was lying, and FACING Potter, with +his nose pointing heavenward. + +"Oh, geeminy, it's HIM!" exclaimed both boys, in a breath. + +"Say, Tom--they say a stray dog come howling around Johnny Miller's +house, 'bout midnight, as much as two weeks ago; and a whippoorwill +come in and lit on the banisters and sung, the very same evening; and +there ain't anybody dead there yet." + +"Well, I know that. And suppose there ain't. Didn't Gracie Miller fall +in the kitchen fire and burn herself terrible the very next Saturday?" + +"Yes, but she ain't DEAD. And what's more, she's getting better, too." + +"All right, you wait and see. She's a goner, just as dead sure as Muff +Potter's a goner. That's what the niggers say, and they know all about +these kind of things, Huck." + +Then they separated, cogitating. When Tom crept in at his bedroom +window the night was almost spent. He undressed with excessive caution, +and fell asleep congratulating himself that nobody knew of his +escapade. He was not aware that the gently-snoring Sid was awake, and +had been so for an hour. + +When Tom awoke, Sid was dressed and gone. There was a late look in the +light, a late sense in the atmosphere. He was startled. Why had he not +been called--persecuted till he was up, as usual? The thought filled +him with bodings. Within five minutes he was dressed and down-stairs, +feeling sore and drowsy. The family were still at table, but they had +finished breakfast. There was no voice of rebuke; but there were +averted eyes; there was a silence and an air of solemnity that struck a +chill to the culprit's heart. He sat down and tried to seem gay, but it +was up-hill work; it roused no smile, no response, and he lapsed into +silence and let his heart sink down to the depths. + +After breakfast his aunt took him aside, and Tom almost brightened in +the hope that he was going to be flogged; but it was not so. His aunt +wept over him and asked him how he could go and break her old heart so; +and finally told him to go on, and ruin himself and bring her gray +hairs with sorrow to the grave, for it was no use for her to try any +more. This was worse than a thousand whippings, and Tom's heart was +sorer now than his body. He cried, he pleaded for forgiveness, promised +to reform over and over again, and then received his dismissal, feeling +that he had won but an imperfect forgiveness and established but a +feeble confidence. + +He left the presence too miserable to even feel revengeful toward Sid; +and so the latter's prompt retreat through the back gate was +unnecessary. He moped to school gloomy and sad, and took his flogging, +along with Joe Harper, for playing hookey the day before, with the air +of one whose heart was busy with heavier woes and wholly dead to +trifles. Then he betook himself to his seat, rested his elbows on his +desk and his jaws in his hands, and stared at the wall with the stony +stare of suffering that has reached the limit and can no further go. +His elbow was pressing against some hard substance. After a long time +he slowly and sadly changed his position, and took up this object with +a sigh. It was in a paper. He unrolled it. A long, lingering, colossal +sigh followed, and his heart broke. It was his brass andiron knob! + +This final feather broke the camel's back. + + + +CHAPTER XI + +CLOSE upon the hour of noon the whole village was suddenly electrified +with the ghastly news. No need of the as yet undreamed-of telegraph; +the tale flew from man to man, from group to group, from house to +house, with little less than telegraphic speed. Of course the +schoolmaster gave holiday for that afternoon; the town would have +thought strangely of him if he had not. + +A gory knife had been found close to the murdered man, and it had been +recognized by somebody as belonging to Muff Potter--so the story ran. +And it was said that a belated citizen had come upon Potter washing +himself in the "branch" about one or two o'clock in the morning, and +that Potter had at once sneaked off--suspicious circumstances, +especially the washing which was not a habit with Potter. It was also +said that the town had been ransacked for this "murderer" (the public +are not slow in the matter of sifting evidence and arriving at a +verdict), but that he could not be found. Horsemen had departed down +all the roads in every direction, and the Sheriff "was confident" that +he would be captured before night. + +All the town was drifting toward the graveyard. Tom's heartbreak +vanished and he joined the procession, not because he would not a +thousand times rather go anywhere else, but because an awful, +unaccountable fascination drew him on. Arrived at the dreadful place, +he wormed his small body through the crowd and saw the dismal +spectacle. It seemed to him an age since he was there before. Somebody +pinched his arm. He turned, and his eyes met Huckleberry's. Then both +looked elsewhere at once, and wondered if anybody had noticed anything +in their mutual glance. But everybody was talking, and intent upon the +grisly spectacle before them. + +"Poor fellow!" "Poor young fellow!" "This ought to be a lesson to +grave robbers!" "Muff Potter'll hang for this if they catch him!" This +was the drift of remark; and the minister said, "It was a judgment; His +hand is here." + +Now Tom shivered from head to heel; for his eye fell upon the stolid +face of Injun Joe. At this moment the crowd began to sway and struggle, +and voices shouted, "It's him! it's him! he's coming himself!" + +"Who? Who?" from twenty voices. + +"Muff Potter!" + +"Hallo, he's stopped!--Look out, he's turning! Don't let him get away!" + +People in the branches of the trees over Tom's head said he wasn't +trying to get away--he only looked doubtful and perplexed. + +"Infernal impudence!" said a bystander; "wanted to come and take a +quiet look at his work, I reckon--didn't expect any company." + +The crowd fell apart, now, and the Sheriff came through, +ostentatiously leading Potter by the arm. The poor fellow's face was +haggard, and his eyes showed the fear that was upon him. When he stood +before the murdered man, he shook as with a palsy, and he put his face +in his hands and burst into tears. + +"I didn't do it, friends," he sobbed; "'pon my word and honor I never +done it." + +"Who's accused you?" shouted a voice. + +This shot seemed to carry home. Potter lifted his face and looked +around him with a pathetic hopelessness in his eyes. He saw Injun Joe, +and exclaimed: + +"Oh, Injun Joe, you promised me you'd never--" + +"Is that your knife?" and it was thrust before him by the Sheriff. + +Potter would have fallen if they had not caught him and eased him to +the ground. Then he said: + +"Something told me 't if I didn't come back and get--" He shuddered; +then waved his nerveless hand with a vanquished gesture and said, "Tell +'em, Joe, tell 'em--it ain't any use any more." + +Then Huckleberry and Tom stood dumb and staring, and heard the +stony-hearted liar reel off his serene statement, they expecting every +moment that the clear sky would deliver God's lightnings upon his head, +and wondering to see how long the stroke was delayed. And when he had +finished and still stood alive and whole, their wavering impulse to +break their oath and save the poor betrayed prisoner's life faded and +vanished away, for plainly this miscreant had sold himself to Satan and +it would be fatal to meddle with the property of such a power as that. + +"Why didn't you leave? What did you want to come here for?" somebody +said. + +"I couldn't help it--I couldn't help it," Potter moaned. "I wanted to +run away, but I couldn't seem to come anywhere but here." And he fell +to sobbing again. + +Injun Joe repeated his statement, just as calmly, a few minutes +afterward on the inquest, under oath; and the boys, seeing that the +lightnings were still withheld, were confirmed in their belief that Joe +had sold himself to the devil. He was now become, to them, the most +balefully interesting object they had ever looked upon, and they could +not take their fascinated eyes from his face. + +They inwardly resolved to watch him nights, when opportunity should +offer, in the hope of getting a glimpse of his dread master. + +Injun Joe helped to raise the body of the murdered man and put it in a +wagon for removal; and it was whispered through the shuddering crowd +that the wound bled a little! The boys thought that this happy +circumstance would turn suspicion in the right direction; but they were +disappointed, for more than one villager remarked: + +"It was within three feet of Muff Potter when it done it." + +Tom's fearful secret and gnawing conscience disturbed his sleep for as +much as a week after this; and at breakfast one morning Sid said: + +"Tom, you pitch around and talk in your sleep so much that you keep me +awake half the time." + +Tom blanched and dropped his eyes. + +"It's a bad sign," said Aunt Polly, gravely. "What you got on your +mind, Tom?" + +"Nothing. Nothing 't I know of." But the boy's hand shook so that he +spilled his coffee. + +"And you do talk such stuff," Sid said. "Last night you said, 'It's +blood, it's blood, that's what it is!' You said that over and over. And +you said, 'Don't torment me so--I'll tell!' Tell WHAT? What is it +you'll tell?" + +Everything was swimming before Tom. There is no telling what might +have happened, now, but luckily the concern passed out of Aunt Polly's +face and she came to Tom's relief without knowing it. She said: + +"Sho! It's that dreadful murder. I dream about it most every night +myself. Sometimes I dream it's me that done it." + +Mary said she had been affected much the same way. Sid seemed +satisfied. Tom got out of the presence as quick as he plausibly could, +and after that he complained of toothache for a week, and tied up his +jaws every night. He never knew that Sid lay nightly watching, and +frequently slipped the bandage free and then leaned on his elbow +listening a good while at a time, and afterward slipped the bandage +back to its place again. Tom's distress of mind wore off gradually and +the toothache grew irksome and was discarded. If Sid really managed to +make anything out of Tom's disjointed mutterings, he kept it to himself. + +It seemed to Tom that his schoolmates never would get done holding +inquests on dead cats, and thus keeping his trouble present to his +mind. Sid noticed that Tom never was coroner at one of these inquiries, +though it had been his habit to take the lead in all new enterprises; +he noticed, too, that Tom never acted as a witness--and that was +strange; and Sid did not overlook the fact that Tom even showed a +marked aversion to these inquests, and always avoided them when he +could. Sid marvelled, but said nothing. However, even inquests went out +of vogue at last, and ceased to torture Tom's conscience. + +Every day or two, during this time of sorrow, Tom watched his +opportunity and went to the little grated jail-window and smuggled such +small comforts through to the "murderer" as he could get hold of. The +jail was a trifling little brick den that stood in a marsh at the edge +of the village, and no guards were afforded for it; indeed, it was +seldom occupied. These offerings greatly helped to ease Tom's +conscience. + +The villagers had a strong desire to tar-and-feather Injun Joe and +ride him on a rail, for body-snatching, but so formidable was his +character that nobody could be found who was willing to take the lead +in the matter, so it was dropped. He had been careful to begin both of +his inquest-statements with the fight, without confessing the +grave-robbery that preceded it; therefore it was deemed wisest not +to try the case in the courts at present. + + + +CHAPTER XII + +ONE of the reasons why Tom's mind had drifted away from its secret +troubles was, that it had found a new and weighty matter to interest +itself about. Becky Thatcher had stopped coming to school. Tom had +struggled with his pride a few days, and tried to "whistle her down the +wind," but failed. He began to find himself hanging around her father's +house, nights, and feeling very miserable. She was ill. What if she +should die! There was distraction in the thought. He no longer took an +interest in war, nor even in piracy. The charm of life was gone; there +was nothing but dreariness left. He put his hoop away, and his bat; +there was no joy in them any more. His aunt was concerned. She began to +try all manner of remedies on him. She was one of those people who are +infatuated with patent medicines and all new-fangled methods of +producing health or mending it. She was an inveterate experimenter in +these things. When something fresh in this line came out she was in a +fever, right away, to try it; not on herself, for she was never ailing, +but on anybody else that came handy. She was a subscriber for all the +"Health" periodicals and phrenological frauds; and the solemn ignorance +they were inflated with was breath to her nostrils. All the "rot" they +contained about ventilation, and how to go to bed, and how to get up, +and what to eat, and what to drink, and how much exercise to take, and +what frame of mind to keep one's self in, and what sort of clothing to +wear, was all gospel to her, and she never observed that her +health-journals of the current month customarily upset everything they +had recommended the month before. She was as simple-hearted and honest +as the day was long, and so she was an easy victim. She gathered +together her quack periodicals and her quack medicines, and thus armed +with death, went about on her pale horse, metaphorically speaking, with +"hell following after." But she never suspected that she was not an +angel of healing and the balm of Gilead in disguise, to the suffering +neighbors. + +The water treatment was new, now, and Tom's low condition was a +windfall to her. She had him out at daylight every morning, stood him +up in the woodshed and drowned him with a deluge of cold water; then +she scrubbed him down with a towel like a file, and so brought him to; +then she rolled him up in a wet sheet and put him away under blankets +till she sweated his soul clean and "the yellow stains of it came +through his pores"--as Tom said. + +Yet notwithstanding all this, the boy grew more and more melancholy +and pale and dejected. She added hot baths, sitz baths, shower baths, +and plunges. The boy remained as dismal as a hearse. She began to +assist the water with a slim oatmeal diet and blister-plasters. She +calculated his capacity as she would a jug's, and filled him up every +day with quack cure-alls. + +Tom had become indifferent to persecution by this time. This phase +filled the old lady's heart with consternation. This indifference must +be broken up at any cost. Now she heard of Pain-killer for the first +time. She ordered a lot at once. She tasted it and was filled with +gratitude. It was simply fire in a liquid form. She dropped the water +treatment and everything else, and pinned her faith to Pain-killer. She +gave Tom a teaspoonful and watched with the deepest anxiety for the +result. Her troubles were instantly at rest, her soul at peace again; +for the "indifference" was broken up. The boy could not have shown a +wilder, heartier interest, if she had built a fire under him. + +Tom felt that it was time to wake up; this sort of life might be +romantic enough, in his blighted condition, but it was getting to have +too little sentiment and too much distracting variety about it. So he +thought over various plans for relief, and finally hit pon that of +professing to be fond of Pain-killer. He asked for it so often that he +became a nuisance, and his aunt ended by telling him to help himself +and quit bothering her. If it had been Sid, she would have had no +misgivings to alloy her delight; but since it was Tom, she watched the +bottle clandestinely. She found that the medicine did really diminish, +but it did not occur to her that the boy was mending the health of a +crack in the sitting-room floor with it. + +One day Tom was in the act of dosing the crack when his aunt's yellow +cat came along, purring, eying the teaspoon avariciously, and begging +for a taste. Tom said: + +"Don't ask for it unless you want it, Peter." + +But Peter signified that he did want it. + +"You better make sure." + +Peter was sure. + +"Now you've asked for it, and I'll give it to you, because there ain't +anything mean about me; but if you find you don't like it, you mustn't +blame anybody but your own self." + +Peter was agreeable. So Tom pried his mouth open and poured down the +Pain-killer. Peter sprang a couple of yards in the air, and then +delivered a war-whoop and set off round and round the room, banging +against furniture, upsetting flower-pots, and making general havoc. +Next he rose on his hind feet and pranced around, in a frenzy of +enjoyment, with his head over his shoulder and his voice proclaiming +his unappeasable happiness. Then he went tearing around the house again +spreading chaos and destruction in his path. Aunt Polly entered in time +to see him throw a few double summersets, deliver a final mighty +hurrah, and sail through the open window, carrying the rest of the +flower-pots with him. The old lady stood petrified with astonishment, +peering over her glasses; Tom lay on the floor expiring with laughter. + +"Tom, what on earth ails that cat?" + +"I don't know, aunt," gasped the boy. + +"Why, I never see anything like it. What did make him act so?" + +"Deed I don't know, Aunt Polly; cats always act so when they're having +a good time." + +"They do, do they?" There was something in the tone that made Tom +apprehensive. + +"Yes'm. That is, I believe they do." + +"You DO?" + +"Yes'm." + +The old lady was bending down, Tom watching, with interest emphasized +by anxiety. Too late he divined her "drift." The handle of the telltale +teaspoon was visible under the bed-valance. Aunt Polly took it, held it +up. Tom winced, and dropped his eyes. Aunt Polly raised him by the +usual handle--his ear--and cracked his head soundly with her thimble. + +"Now, sir, what did you want to treat that poor dumb beast so, for?" + +"I done it out of pity for him--because he hadn't any aunt." + +"Hadn't any aunt!--you numskull. What has that got to do with it?" + +"Heaps. Because if he'd had one she'd a burnt him out herself! She'd a +roasted his bowels out of him 'thout any more feeling than if he was a +human!" + +Aunt Polly felt a sudden pang of remorse. This was putting the thing +in a new light; what was cruelty to a cat MIGHT be cruelty to a boy, +too. She began to soften; she felt sorry. Her eyes watered a little, +and she put her hand on Tom's head and said gently: + +"I was meaning for the best, Tom. And, Tom, it DID do you good." + +Tom looked up in her face with just a perceptible twinkle peeping +through his gravity. + +"I know you was meaning for the best, aunty, and so was I with Peter. +It done HIM good, too. I never see him get around so since--" + +"Oh, go 'long with you, Tom, before you aggravate me again. And you +try and see if you can't be a good boy, for once, and you needn't take +any more medicine." + +Tom reached school ahead of time. It was noticed that this strange +thing had been occurring every day latterly. And now, as usual of late, +he hung about the gate of the schoolyard instead of playing with his +comrades. He was sick, he said, and he looked it. He tried to seem to +be looking everywhere but whither he really was looking--down the road. +Presently Jeff Thatcher hove in sight, and Tom's face lighted; he gazed +a moment, and then turned sorrowfully away. When Jeff arrived, Tom +accosted him; and "led up" warily to opportunities for remark about +Becky, but the giddy lad never could see the bait. Tom watched and +watched, hoping whenever a frisking frock came in sight, and hating the +owner of it as soon as he saw she was not the right one. At last frocks +ceased to appear, and he dropped hopelessly into the dumps; he entered +the empty schoolhouse and sat down to suffer. Then one more frock +passed in at the gate, and Tom's heart gave a great bound. The next +instant he was out, and "going on" like an Indian; yelling, laughing, +chasing boys, jumping over the fence at risk of life and limb, throwing +handsprings, standing on his head--doing all the heroic things he could +conceive of, and keeping a furtive eye out, all the while, to see if +Becky Thatcher was noticing. But she seemed to be unconscious of it +all; she never looked. Could it be possible that she was not aware that +he was there? He carried his exploits to her immediate vicinity; came +war-whooping around, snatched a boy's cap, hurled it to the roof of the +schoolhouse, broke through a group of boys, tumbling them in every +direction, and fell sprawling, himself, under Becky's nose, almost +upsetting her--and she turned, with her nose in the air, and he heard +her say: "Mf! some people think they're mighty smart--always showing +off!" + +Tom's cheeks burned. He gathered himself up and sneaked off, crushed +and crestfallen. + + + +CHAPTER XIII + +TOM'S mind was made up now. He was gloomy and desperate. He was a +forsaken, friendless boy, he said; nobody loved him; when they found +out what they had driven him to, perhaps they would be sorry; he had +tried to do right and get along, but they would not let him; since +nothing would do them but to be rid of him, let it be so; and let them +blame HIM for the consequences--why shouldn't they? What right had the +friendless to complain? Yes, they had forced him to it at last: he +would lead a life of crime. There was no choice. + +By this time he was far down Meadow Lane, and the bell for school to +"take up" tinkled faintly upon his ear. He sobbed, now, to think he +should never, never hear that old familiar sound any more--it was very +hard, but it was forced on him; since he was driven out into the cold +world, he must submit--but he forgave them. Then the sobs came thick +and fast. + +Just at this point he met his soul's sworn comrade, Joe Harper +--hard-eyed, and with evidently a great and dismal purpose in his heart. +Plainly here were "two souls with but a single thought." Tom, wiping +his eyes with his sleeve, began to blubber out something about a +resolution to escape from hard usage and lack of sympathy at home by +roaming abroad into the great world never to return; and ended by +hoping that Joe would not forget him. + +But it transpired that this was a request which Joe had just been +going to make of Tom, and had come to hunt him up for that purpose. His +mother had whipped him for drinking some cream which he had never +tasted and knew nothing about; it was plain that she was tired of him +and wished him to go; if she felt that way, there was nothing for him +to do but succumb; he hoped she would be happy, and never regret having +driven her poor boy out into the unfeeling world to suffer and die. + +As the two boys walked sorrowing along, they made a new compact to +stand by each other and be brothers and never separate till death +relieved them of their troubles. Then they began to lay their plans. +Joe was for being a hermit, and living on crusts in a remote cave, and +dying, some time, of cold and want and grief; but after listening to +Tom, he conceded that there were some conspicuous advantages about a +life of crime, and so he consented to be a pirate. + +Three miles below St. Petersburg, at a point where the Mississippi +River was a trifle over a mile wide, there was a long, narrow, wooded +island, with a shallow bar at the head of it, and this offered well as +a rendezvous. It was not inhabited; it lay far over toward the further +shore, abreast a dense and almost wholly unpeopled forest. So Jackson's +Island was chosen. Who were to be the subjects of their piracies was a +matter that did not occur to them. Then they hunted up Huckleberry +Finn, and he joined them promptly, for all careers were one to him; he +was indifferent. They presently separated to meet at a lonely spot on +the river-bank two miles above the village at the favorite hour--which +was midnight. There was a small log raft there which they meant to +capture. Each would bring hooks and lines, and such provision as he +could steal in the most dark and mysterious way--as became outlaws. And +before the afternoon was done, they had all managed to enjoy the sweet +glory of spreading the fact that pretty soon the town would "hear +something." All who got this vague hint were cautioned to "be mum and +wait." + +About midnight Tom arrived with a boiled ham and a few trifles, +and stopped in a dense undergrowth on a small bluff overlooking the +meeting-place. It was starlight, and very still. The mighty river lay +like an ocean at rest. Tom listened a moment, but no sound disturbed the +quiet. Then he gave a low, distinct whistle. It was answered from under +the bluff. Tom whistled twice more; these signals were answered in the +same way. Then a guarded voice said: + +"Who goes there?" + +"Tom Sawyer, the Black Avenger of the Spanish Main. Name your names." + +"Huck Finn the Red-Handed, and Joe Harper the Terror of the Seas." Tom +had furnished these titles, from his favorite literature. + +"'Tis well. Give the countersign." + +Two hoarse whispers delivered the same awful word simultaneously to +the brooding night: + +"BLOOD!" + +Then Tom tumbled his ham over the bluff and let himself down after it, +tearing both skin and clothes to some extent in the effort. There was +an easy, comfortable path along the shore under the bluff, but it +lacked the advantages of difficulty and danger so valued by a pirate. + +The Terror of the Seas had brought a side of bacon, and had about worn +himself out with getting it there. Finn the Red-Handed had stolen a +skillet and a quantity of half-cured leaf tobacco, and had also brought +a few corn-cobs to make pipes with. But none of the pirates smoked or +"chewed" but himself. The Black Avenger of the Spanish Main said it +would never do to start without some fire. That was a wise thought; +matches were hardly known there in that day. They saw a fire +smouldering upon a great raft a hundred yards above, and they went +stealthily thither and helped themselves to a chunk. They made an +imposing adventure of it, saying, "Hist!" every now and then, and +suddenly halting with finger on lip; moving with hands on imaginary +dagger-hilts; and giving orders in dismal whispers that if "the foe" +stirred, to "let him have it to the hilt," because "dead men tell no +tales." They knew well enough that the raftsmen were all down at the +village laying in stores or having a spree, but still that was no +excuse for their conducting this thing in an unpiratical way. + +They shoved off, presently, Tom in command, Huck at the after oar and +Joe at the forward. Tom stood amidships, gloomy-browed, and with folded +arms, and gave his orders in a low, stern whisper: + +"Luff, and bring her to the wind!" + +"Aye-aye, sir!" + +"Steady, steady-y-y-y!" + +"Steady it is, sir!" + +"Let her go off a point!" + +"Point it is, sir!" + +As the boys steadily and monotonously drove the raft toward mid-stream +it was no doubt understood that these orders were given only for +"style," and were not intended to mean anything in particular. + +"What sail's she carrying?" + +"Courses, tops'ls, and flying-jib, sir." + +"Send the r'yals up! Lay out aloft, there, half a dozen of ye +--foretopmaststuns'l! Lively, now!" + +"Aye-aye, sir!" + +"Shake out that maintogalans'l! Sheets and braces! NOW my hearties!" + +"Aye-aye, sir!" + +"Hellum-a-lee--hard a port! Stand by to meet her when she comes! Port, +port! NOW, men! With a will! Stead-y-y-y!" + +"Steady it is, sir!" + +The raft drew beyond the middle of the river; the boys pointed her +head right, and then lay on their oars. The river was not high, so +there was not more than a two or three mile current. Hardly a word was +said during the next three-quarters of an hour. Now the raft was +passing before the distant town. Two or three glimmering lights showed +where it lay, peacefully sleeping, beyond the vague vast sweep of +star-gemmed water, unconscious of the tremendous event that was happening. +The Black Avenger stood still with folded arms, "looking his last" upon +the scene of his former joys and his later sufferings, and wishing +"she" could see him now, abroad on the wild sea, facing peril and death +with dauntless heart, going to his doom with a grim smile on his lips. +It was but a small strain on his imagination to remove Jackson's Island +beyond eyeshot of the village, and so he "looked his last" with a +broken and satisfied heart. The other pirates were looking their last, +too; and they all looked so long that they came near letting the +current drift them out of the range of the island. But they discovered +the danger in time, and made shift to avert it. About two o'clock in +the morning the raft grounded on the bar two hundred yards above the +head of the island, and they waded back and forth until they had landed +their freight. Part of the little raft's belongings consisted of an old +sail, and this they spread over a nook in the bushes for a tent to +shelter their provisions; but they themselves would sleep in the open +air in good weather, as became outlaws. + +They built a fire against the side of a great log twenty or thirty +steps within the sombre depths of the forest, and then cooked some +bacon in the frying-pan for supper, and used up half of the corn "pone" +stock they had brought. It seemed glorious sport to be feasting in that +wild, free way in the virgin forest of an unexplored and uninhabited +island, far from the haunts of men, and they said they never would +return to civilization. The climbing fire lit up their faces and threw +its ruddy glare upon the pillared tree-trunks of their forest temple, +and upon the varnished foliage and festooning vines. + +When the last crisp slice of bacon was gone, and the last allowance of +corn pone devoured, the boys stretched themselves out on the grass, +filled with contentment. They could have found a cooler place, but they +would not deny themselves such a romantic feature as the roasting +camp-fire. + +"AIN'T it gay?" said Joe. + +"It's NUTS!" said Tom. "What would the boys say if they could see us?" + +"Say? Well, they'd just die to be here--hey, Hucky!" + +"I reckon so," said Huckleberry; "anyways, I'm suited. I don't want +nothing better'n this. I don't ever get enough to eat, gen'ally--and +here they can't come and pick at a feller and bullyrag him so." + +"It's just the life for me," said Tom. "You don't have to get up, +mornings, and you don't have to go to school, and wash, and all that +blame foolishness. You see a pirate don't have to do ANYTHING, Joe, +when he's ashore, but a hermit HE has to be praying considerable, and +then he don't have any fun, anyway, all by himself that way." + +"Oh yes, that's so," said Joe, "but I hadn't thought much about it, +you know. I'd a good deal rather be a pirate, now that I've tried it." + +"You see," said Tom, "people don't go much on hermits, nowadays, like +they used to in old times, but a pirate's always respected. And a +hermit's got to sleep on the hardest place he can find, and put +sackcloth and ashes on his head, and stand out in the rain, and--" + +"What does he put sackcloth and ashes on his head for?" inquired Huck. + +"I dono. But they've GOT to do it. Hermits always do. You'd have to do +that if you was a hermit." + +"Dern'd if I would," said Huck. + +"Well, what would you do?" + +"I dono. But I wouldn't do that." + +"Why, Huck, you'd HAVE to. How'd you get around it?" + +"Why, I just wouldn't stand it. I'd run away." + +"Run away! Well, you WOULD be a nice old slouch of a hermit. You'd be +a disgrace." + +The Red-Handed made no response, being better employed. He had +finished gouging out a cob, and now he fitted a weed stem to it, loaded +it with tobacco, and was pressing a coal to the charge and blowing a +cloud of fragrant smoke--he was in the full bloom of luxurious +contentment. The other pirates envied him this majestic vice, and +secretly resolved to acquire it shortly. Presently Huck said: + +"What does pirates have to do?" + +Tom said: + +"Oh, they have just a bully time--take ships and burn them, and get +the money and bury it in awful places in their island where there's +ghosts and things to watch it, and kill everybody in the ships--make +'em walk a plank." + +"And they carry the women to the island," said Joe; "they don't kill +the women." + +"No," assented Tom, "they don't kill the women--they're too noble. And +the women's always beautiful, too. + +"And don't they wear the bulliest clothes! Oh no! All gold and silver +and di'monds," said Joe, with enthusiasm. + +"Who?" said Huck. + +"Why, the pirates." + +Huck scanned his own clothing forlornly. + +"I reckon I ain't dressed fitten for a pirate," said he, with a +regretful pathos in his voice; "but I ain't got none but these." + +But the other boys told him the fine clothes would come fast enough, +after they should have begun their adventures. They made him understand +that his poor rags would do to begin with, though it was customary for +wealthy pirates to start with a proper wardrobe. + +Gradually their talk died out and drowsiness began to steal upon the +eyelids of the little waifs. The pipe dropped from the fingers of the +Red-Handed, and he slept the sleep of the conscience-free and the +weary. The Terror of the Seas and the Black Avenger of the Spanish Main +had more difficulty in getting to sleep. They said their prayers +inwardly, and lying down, since there was nobody there with authority +to make them kneel and recite aloud; in truth, they had a mind not to +say them at all, but they were afraid to proceed to such lengths as +that, lest they might call down a sudden and special thunderbolt from +heaven. Then at once they reached and hovered upon the imminent verge +of sleep--but an intruder came, now, that would not "down." It was +conscience. They began to feel a vague fear that they had been doing +wrong to run away; and next they thought of the stolen meat, and then +the real torture came. They tried to argue it away by reminding +conscience that they had purloined sweetmeats and apples scores of +times; but conscience was not to be appeased by such thin +plausibilities; it seemed to them, in the end, that there was no +getting around the stubborn fact that taking sweetmeats was only +"hooking," while taking bacon and hams and such valuables was plain +simple stealing--and there was a command against that in the Bible. So +they inwardly resolved that so long as they remained in the business, +their piracies should not again be sullied with the crime of stealing. +Then conscience granted a truce, and these curiously inconsistent +pirates fell peacefully to sleep. + + + +CHAPTER XIV + +WHEN Tom awoke in the morning, he wondered where he was. He sat up and +rubbed his eyes and looked around. Then he comprehended. It was the +cool gray dawn, and there was a delicious sense of repose and peace in +the deep pervading calm and silence of the woods. Not a leaf stirred; +not a sound obtruded upon great Nature's meditation. Beaded dewdrops +stood upon the leaves and grasses. A white layer of ashes covered the +fire, and a thin blue breath of smoke rose straight into the air. Joe +and Huck still slept. + +Now, far away in the woods a bird called; another answered; presently +the hammering of a woodpecker was heard. Gradually the cool dim gray of +the morning whitened, and as gradually sounds multiplied and life +manifested itself. The marvel of Nature shaking off sleep and going to +work unfolded itself to the musing boy. A little green worm came +crawling over a dewy leaf, lifting two-thirds of his body into the air +from time to time and "sniffing around," then proceeding again--for he +was measuring, Tom said; and when the worm approached him, of its own +accord, he sat as still as a stone, with his hopes rising and falling, +by turns, as the creature still came toward him or seemed inclined to +go elsewhere; and when at last it considered a painful moment with its +curved body in the air and then came decisively down upon Tom's leg and +began a journey over him, his whole heart was glad--for that meant that +he was going to have a new suit of clothes--without the shadow of a +doubt a gaudy piratical uniform. Now a procession of ants appeared, +from nowhere in particular, and went about their labors; one struggled +manfully by with a dead spider five times as big as itself in its arms, +and lugged it straight up a tree-trunk. A brown spotted lady-bug +climbed the dizzy height of a grass blade, and Tom bent down close to +it and said, "Lady-bug, lady-bug, fly away home, your house is on fire, +your children's alone," and she took wing and went off to see about it +--which did not surprise the boy, for he knew of old that this insect was +credulous about conflagrations, and he had practised upon its +simplicity more than once. A tumblebug came next, heaving sturdily at +its ball, and Tom touched the creature, to see it shut its legs against +its body and pretend to be dead. The birds were fairly rioting by this +time. A catbird, the Northern mocker, lit in a tree over Tom's head, +and trilled out her imitations of her neighbors in a rapture of +enjoyment; then a shrill jay swept down, a flash of blue flame, and +stopped on a twig almost within the boy's reach, cocked his head to one +side and eyed the strangers with a consuming curiosity; a gray squirrel +and a big fellow of the "fox" kind came skurrying along, sitting up at +intervals to inspect and chatter at the boys, for the wild things had +probably never seen a human being before and scarcely knew whether to +be afraid or not. All Nature was wide awake and stirring, now; long +lances of sunlight pierced down through the dense foliage far and near, +and a few butterflies came fluttering upon the scene. + +Tom stirred up the other pirates and they all clattered away with a +shout, and in a minute or two were stripped and chasing after and +tumbling over each other in the shallow limpid water of the white +sandbar. They felt no longing for the little village sleeping in the +distance beyond the majestic waste of water. A vagrant current or a +slight rise in the river had carried off their raft, but this only +gratified them, since its going was something like burning the bridge +between them and civilization. + +They came back to camp wonderfully refreshed, glad-hearted, and +ravenous; and they soon had the camp-fire blazing up again. Huck found +a spring of clear cold water close by, and the boys made cups of broad +oak or hickory leaves, and felt that water, sweetened with such a +wildwood charm as that, would be a good enough substitute for coffee. +While Joe was slicing bacon for breakfast, Tom and Huck asked him to +hold on a minute; they stepped to a promising nook in the river-bank +and threw in their lines; almost immediately they had reward. Joe had +not had time to get impatient before they were back again with some +handsome bass, a couple of sun-perch and a small catfish--provisions +enough for quite a family. They fried the fish with the bacon, and were +astonished; for no fish had ever seemed so delicious before. They did +not know that the quicker a fresh-water fish is on the fire after he is +caught the better he is; and they reflected little upon what a sauce +open-air sleeping, open-air exercise, bathing, and a large ingredient +of hunger make, too. + +They lay around in the shade, after breakfast, while Huck had a smoke, +and then went off through the woods on an exploring expedition. They +tramped gayly along, over decaying logs, through tangled underbrush, +among solemn monarchs of the forest, hung from their crowns to the +ground with a drooping regalia of grape-vines. Now and then they came +upon snug nooks carpeted with grass and jeweled with flowers. + +They found plenty of things to be delighted with, but nothing to be +astonished at. They discovered that the island was about three miles +long and a quarter of a mile wide, and that the shore it lay closest to +was only separated from it by a narrow channel hardly two hundred yards +wide. They took a swim about every hour, so it was close upon the +middle of the afternoon when they got back to camp. They were too +hungry to stop to fish, but they fared sumptuously upon cold ham, and +then threw themselves down in the shade to talk. But the talk soon +began to drag, and then died. The stillness, the solemnity that brooded +in the woods, and the sense of loneliness, began to tell upon the +spirits of the boys. They fell to thinking. A sort of undefined longing +crept upon them. This took dim shape, presently--it was budding +homesickness. Even Finn the Red-Handed was dreaming of his doorsteps +and empty hogsheads. But they were all ashamed of their weakness, and +none was brave enough to speak his thought. + +For some time, now, the boys had been dully conscious of a peculiar +sound in the distance, just as one sometimes is of the ticking of a +clock which he takes no distinct note of. But now this mysterious sound +became more pronounced, and forced a recognition. The boys started, +glanced at each other, and then each assumed a listening attitude. +There was a long silence, profound and unbroken; then a deep, sullen +boom came floating down out of the distance. + +"What is it!" exclaimed Joe, under his breath. + +"I wonder," said Tom in a whisper. + +"'Tain't thunder," said Huckleberry, in an awed tone, "becuz thunder--" + +"Hark!" said Tom. "Listen--don't talk." + +They waited a time that seemed an age, and then the same muffled boom +troubled the solemn hush. + +"Let's go and see." + +They sprang to their feet and hurried to the shore toward the town. +They parted the bushes on the bank and peered out over the water. The +little steam ferryboat was about a mile below the village, drifting +with the current. Her broad deck seemed crowded with people. There were +a great many skiffs rowing about or floating with the stream in the +neighborhood of the ferryboat, but the boys could not determine what +the men in them were doing. Presently a great jet of white smoke burst +from the ferryboat's side, and as it expanded and rose in a lazy cloud, +that same dull throb of sound was borne to the listeners again. + +"I know now!" exclaimed Tom; "somebody's drownded!" + +"That's it!" said Huck; "they done that last summer, when Bill Turner +got drownded; they shoot a cannon over the water, and that makes him +come up to the top. Yes, and they take loaves of bread and put +quicksilver in 'em and set 'em afloat, and wherever there's anybody +that's drownded, they'll float right there and stop." + +"Yes, I've heard about that," said Joe. "I wonder what makes the bread +do that." + +"Oh, it ain't the bread, so much," said Tom; "I reckon it's mostly +what they SAY over it before they start it out." + +"But they don't say anything over it," said Huck. "I've seen 'em and +they don't." + +"Well, that's funny," said Tom. "But maybe they say it to themselves. +Of COURSE they do. Anybody might know that." + +The other boys agreed that there was reason in what Tom said, because +an ignorant lump of bread, uninstructed by an incantation, could not be +expected to act very intelligently when set upon an errand of such +gravity. + +"By jings, I wish I was over there, now," said Joe. + +"I do too" said Huck "I'd give heaps to know who it is." + +The boys still listened and watched. Presently a revealing thought +flashed through Tom's mind, and he exclaimed: + +"Boys, I know who's drownded--it's us!" + +They felt like heroes in an instant. Here was a gorgeous triumph; they +were missed; they were mourned; hearts were breaking on their account; +tears were being shed; accusing memories of unkindness to these poor +lost lads were rising up, and unavailing regrets and remorse were being +indulged; and best of all, the departed were the talk of the whole +town, and the envy of all the boys, as far as this dazzling notoriety +was concerned. This was fine. It was worth while to be a pirate, after +all. + +As twilight drew on, the ferryboat went back to her accustomed +business and the skiffs disappeared. The pirates returned to camp. They +were jubilant with vanity over their new grandeur and the illustrious +trouble they were making. They caught fish, cooked supper and ate it, +and then fell to guessing at what the village was thinking and saying +about them; and the pictures they drew of the public distress on their +account were gratifying to look upon--from their point of view. But +when the shadows of night closed them in, they gradually ceased to +talk, and sat gazing into the fire, with their minds evidently +wandering elsewhere. The excitement was gone, now, and Tom and Joe +could not keep back thoughts of certain persons at home who were not +enjoying this fine frolic as much as they were. Misgivings came; they +grew troubled and unhappy; a sigh or two escaped, unawares. By and by +Joe timidly ventured upon a roundabout "feeler" as to how the others +might look upon a return to civilization--not right now, but-- + +Tom withered him with derision! Huck, being uncommitted as yet, joined +in with Tom, and the waverer quickly "explained," and was glad to get +out of the scrape with as little taint of chicken-hearted homesickness +clinging to his garments as he could. Mutiny was effectually laid to +rest for the moment. + +As the night deepened, Huck began to nod, and presently to snore. Joe +followed next. Tom lay upon his elbow motionless, for some time, +watching the two intently. At last he got up cautiously, on his knees, +and went searching among the grass and the flickering reflections flung +by the camp-fire. He picked up and inspected several large +semi-cylinders of the thin white bark of a sycamore, and finally chose +two which seemed to suit him. Then he knelt by the fire and painfully +wrote something upon each of these with his "red keel"; one he rolled up +and put in his jacket pocket, and the other he put in Joe's hat and +removed it to a little distance from the owner. And he also put into the +hat certain schoolboy treasures of almost inestimable value--among them +a lump of chalk, an India-rubber ball, three fishhooks, and one of that +kind of marbles known as a "sure 'nough crystal." Then he tiptoed his +way cautiously among the trees till he felt that he was out of hearing, +and straightway broke into a keen run in the direction of the sandbar. + + + +CHAPTER XV + +A FEW minutes later Tom was in the shoal water of the bar, wading +toward the Illinois shore. Before the depth reached his middle he was +half-way over; the current would permit no more wading, now, so he +struck out confidently to swim the remaining hundred yards. He swam +quartering upstream, but still was swept downward rather faster than he +had expected. However, he reached the shore finally, and drifted along +till he found a low place and drew himself out. He put his hand on his +jacket pocket, found his piece of bark safe, and then struck through +the woods, following the shore, with streaming garments. Shortly before +ten o'clock he came out into an open place opposite the village, and +saw the ferryboat lying in the shadow of the trees and the high bank. +Everything was quiet under the blinking stars. He crept down the bank, +watching with all his eyes, slipped into the water, swam three or four +strokes and climbed into the skiff that did "yawl" duty at the boat's +stern. He laid himself down under the thwarts and waited, panting. + +Presently the cracked bell tapped and a voice gave the order to "cast +off." A minute or two later the skiff's head was standing high up, +against the boat's swell, and the voyage was begun. Tom felt happy in +his success, for he knew it was the boat's last trip for the night. At +the end of a long twelve or fifteen minutes the wheels stopped, and Tom +slipped overboard and swam ashore in the dusk, landing fifty yards +downstream, out of danger of possible stragglers. + +He flew along unfrequented alleys, and shortly found himself at his +aunt's back fence. He climbed over, approached the "ell," and looked in +at the sitting-room window, for a light was burning there. There sat +Aunt Polly, Sid, Mary, and Joe Harper's mother, grouped together, +talking. They were by the bed, and the bed was between them and the +door. Tom went to the door and began to softly lift the latch; then he +pressed gently and the door yielded a crack; he continued pushing +cautiously, and quaking every time it creaked, till he judged he might +squeeze through on his knees; so he put his head through and began, +warily. + +"What makes the candle blow so?" said Aunt Polly. Tom hurried up. +"Why, that door's open, I believe. Why, of course it is. No end of +strange things now. Go 'long and shut it, Sid." + +Tom disappeared under the bed just in time. He lay and "breathed" +himself for a time, and then crept to where he could almost touch his +aunt's foot. + +"But as I was saying," said Aunt Polly, "he warn't BAD, so to say +--only mischEEvous. Only just giddy, and harum-scarum, you know. He +warn't any more responsible than a colt. HE never meant any harm, and +he was the best-hearted boy that ever was"--and she began to cry. + +"It was just so with my Joe--always full of his devilment, and up to +every kind of mischief, but he was just as unselfish and kind as he +could be--and laws bless me, to think I went and whipped him for taking +that cream, never once recollecting that I throwed it out myself +because it was sour, and I never to see him again in this world, never, +never, never, poor abused boy!" And Mrs. Harper sobbed as if her heart +would break. + +"I hope Tom's better off where he is," said Sid, "but if he'd been +better in some ways--" + +"SID!" Tom felt the glare of the old lady's eye, though he could not +see it. "Not a word against my Tom, now that he's gone! God'll take +care of HIM--never you trouble YOURself, sir! Oh, Mrs. Harper, I don't +know how to give him up! I don't know how to give him up! He was such a +comfort to me, although he tormented my old heart out of me, 'most." + +"The Lord giveth and the Lord hath taken away--Blessed be the name of +the Lord! But it's so hard--Oh, it's so hard! Only last Saturday my +Joe busted a firecracker right under my nose and I knocked him +sprawling. Little did I know then, how soon--Oh, if it was to do over +again I'd hug him and bless him for it." + +"Yes, yes, yes, I know just how you feel, Mrs. Harper, I know just +exactly how you feel. No longer ago than yesterday noon, my Tom took +and filled the cat full of Pain-killer, and I did think the cretur +would tear the house down. And God forgive me, I cracked Tom's head +with my thimble, poor boy, poor dead boy. But he's out of all his +troubles now. And the last words I ever heard him say was to reproach--" + +But this memory was too much for the old lady, and she broke entirely +down. Tom was snuffling, now, himself--and more in pity of himself than +anybody else. He could hear Mary crying, and putting in a kindly word +for him from time to time. He began to have a nobler opinion of himself +than ever before. Still, he was sufficiently touched by his aunt's +grief to long to rush out from under the bed and overwhelm her with +joy--and the theatrical gorgeousness of the thing appealed strongly to +his nature, too, but he resisted and lay still. + +He went on listening, and gathered by odds and ends that it was +conjectured at first that the boys had got drowned while taking a swim; +then the small raft had been missed; next, certain boys said the +missing lads had promised that the village should "hear something" +soon; the wise-heads had "put this and that together" and decided that +the lads had gone off on that raft and would turn up at the next town +below, presently; but toward noon the raft had been found, lodged +against the Missouri shore some five or six miles below the village +--and then hope perished; they must be drowned, else hunger would have +driven them home by nightfall if not sooner. It was believed that the +search for the bodies had been a fruitless effort merely because the +drowning must have occurred in mid-channel, since the boys, being good +swimmers, would otherwise have escaped to shore. This was Wednesday +night. If the bodies continued missing until Sunday, all hope would be +given over, and the funerals would be preached on that morning. Tom +shuddered. + +Mrs. Harper gave a sobbing good-night and turned to go. Then with a +mutual impulse the two bereaved women flung themselves into each +other's arms and had a good, consoling cry, and then parted. Aunt Polly +was tender far beyond her wont, in her good-night to Sid and Mary. Sid +snuffled a bit and Mary went off crying with all her heart. + +Aunt Polly knelt down and prayed for Tom so touchingly, so +appealingly, and with such measureless love in her words and her old +trembling voice, that he was weltering in tears again, long before she +was through. + +He had to keep still long after she went to bed, for she kept making +broken-hearted ejaculations from time to time, tossing unrestfully, and +turning over. But at last she was still, only moaning a little in her +sleep. Now the boy stole out, rose gradually by the bedside, shaded the +candle-light with his hand, and stood regarding her. His heart was full +of pity for her. He took out his sycamore scroll and placed it by the +candle. But something occurred to him, and he lingered considering. His +face lighted with a happy solution of his thought; he put the bark +hastily in his pocket. Then he bent over and kissed the faded lips, and +straightway made his stealthy exit, latching the door behind him. + +He threaded his way back to the ferry landing, found nobody at large +there, and walked boldly on board the boat, for he knew she was +tenantless except that there was a watchman, who always turned in and +slept like a graven image. He untied the skiff at the stern, slipped +into it, and was soon rowing cautiously upstream. When he had pulled a +mile above the village, he started quartering across and bent himself +stoutly to his work. He hit the landing on the other side neatly, for +this was a familiar bit of work to him. He was moved to capture the +skiff, arguing that it might be considered a ship and therefore +legitimate prey for a pirate, but he knew a thorough search would be +made for it and that might end in revelations. So he stepped ashore and +entered the woods. + +He sat down and took a long rest, torturing himself meanwhile to keep +awake, and then started warily down the home-stretch. The night was far +spent. It was broad daylight before he found himself fairly abreast the +island bar. He rested again until the sun was well up and gilding the +great river with its splendor, and then he plunged into the stream. A +little later he paused, dripping, upon the threshold of the camp, and +heard Joe say: + +"No, Tom's true-blue, Huck, and he'll come back. He won't desert. He +knows that would be a disgrace to a pirate, and Tom's too proud for +that sort of thing. He's up to something or other. Now I wonder what?" + +"Well, the things is ours, anyway, ain't they?" + +"Pretty near, but not yet, Huck. The writing says they are if he ain't +back here to breakfast." + +"Which he is!" exclaimed Tom, with fine dramatic effect, stepping +grandly into camp. + +A sumptuous breakfast of bacon and fish was shortly provided, and as +the boys set to work upon it, Tom recounted (and adorned) his +adventures. They were a vain and boastful company of heroes when the +tale was done. Then Tom hid himself away in a shady nook to sleep till +noon, and the other pirates got ready to fish and explore. + + + +CHAPTER XVI + +AFTER dinner all the gang turned out to hunt for turtle eggs on the +bar. They went about poking sticks into the sand, and when they found a +soft place they went down on their knees and dug with their hands. +Sometimes they would take fifty or sixty eggs out of one hole. They +were perfectly round white things a trifle smaller than an English +walnut. They had a famous fried-egg feast that night, and another on +Friday morning. + +After breakfast they went whooping and prancing out on the bar, and +chased each other round and round, shedding clothes as they went, until +they were naked, and then continued the frolic far away up the shoal +water of the bar, against the stiff current, which latter tripped their +legs from under them from time to time and greatly increased the fun. +And now and then they stooped in a group and splashed water in each +other's faces with their palms, gradually approaching each other, with +averted faces to avoid the strangling sprays, and finally gripping and +struggling till the best man ducked his neighbor, and then they all +went under in a tangle of white legs and arms and came up blowing, +sputtering, laughing, and gasping for breath at one and the same time. + +When they were well exhausted, they would run out and sprawl on the +dry, hot sand, and lie there and cover themselves up with it, and by +and by break for the water again and go through the original +performance once more. Finally it occurred to them that their naked +skin represented flesh-colored "tights" very fairly; so they drew a +ring in the sand and had a circus--with three clowns in it, for none +would yield this proudest post to his neighbor. + +Next they got their marbles and played "knucks" and "ring-taw" and +"keeps" till that amusement grew stale. Then Joe and Huck had another +swim, but Tom would not venture, because he found that in kicking off +his trousers he had kicked his string of rattlesnake rattles off his +ankle, and he wondered how he had escaped cramp so long without the +protection of this mysterious charm. He did not venture again until he +had found it, and by that time the other boys were tired and ready to +rest. They gradually wandered apart, dropped into the "dumps," and fell +to gazing longingly across the wide river to where the village lay +drowsing in the sun. Tom found himself writing "BECKY" in the sand with +his big toe; he scratched it out, and was angry with himself for his +weakness. But he wrote it again, nevertheless; he could not help it. He +erased it once more and then took himself out of temptation by driving +the other boys together and joining them. + +But Joe's spirits had gone down almost beyond resurrection. He was so +homesick that he could hardly endure the misery of it. The tears lay +very near the surface. Huck was melancholy, too. Tom was downhearted, +but tried hard not to show it. He had a secret which he was not ready +to tell, yet, but if this mutinous depression was not broken up soon, +he would have to bring it out. He said, with a great show of +cheerfulness: + +"I bet there's been pirates on this island before, boys. We'll explore +it again. They've hid treasures here somewhere. How'd you feel to light +on a rotten chest full of gold and silver--hey?" + +But it roused only faint enthusiasm, which faded out, with no reply. +Tom tried one or two other seductions; but they failed, too. It was +discouraging work. Joe sat poking up the sand with a stick and looking +very gloomy. Finally he said: + +"Oh, boys, let's give it up. I want to go home. It's so lonesome." + +"Oh no, Joe, you'll feel better by and by," said Tom. "Just think of +the fishing that's here." + +"I don't care for fishing. I want to go home." + +"But, Joe, there ain't such another swimming-place anywhere." + +"Swimming's no good. I don't seem to care for it, somehow, when there +ain't anybody to say I sha'n't go in. I mean to go home." + +"Oh, shucks! Baby! You want to see your mother, I reckon." + +"Yes, I DO want to see my mother--and you would, too, if you had one. +I ain't any more baby than you are." And Joe snuffled a little. + +"Well, we'll let the cry-baby go home to his mother, won't we, Huck? +Poor thing--does it want to see its mother? And so it shall. You like +it here, don't you, Huck? We'll stay, won't we?" + +Huck said, "Y-e-s"--without any heart in it. + +"I'll never speak to you again as long as I live," said Joe, rising. +"There now!" And he moved moodily away and began to dress himself. + +"Who cares!" said Tom. "Nobody wants you to. Go 'long home and get +laughed at. Oh, you're a nice pirate. Huck and me ain't cry-babies. +We'll stay, won't we, Huck? Let him go if he wants to. I reckon we can +get along without him, per'aps." + +But Tom was uneasy, nevertheless, and was alarmed to see Joe go +sullenly on with his dressing. And then it was discomforting to see +Huck eying Joe's preparations so wistfully, and keeping up such an +ominous silence. Presently, without a parting word, Joe began to wade +off toward the Illinois shore. Tom's heart began to sink. He glanced at +Huck. Huck could not bear the look, and dropped his eyes. Then he said: + +"I want to go, too, Tom. It was getting so lonesome anyway, and now +it'll be worse. Let's us go, too, Tom." + +"I won't! You can all go, if you want to. I mean to stay." + +"Tom, I better go." + +"Well, go 'long--who's hendering you." + +Huck began to pick up his scattered clothes. He said: + +"Tom, I wisht you'd come, too. Now you think it over. We'll wait for +you when we get to shore." + +"Well, you'll wait a blame long time, that's all." + +Huck started sorrowfully away, and Tom stood looking after him, with a +strong desire tugging at his heart to yield his pride and go along too. +He hoped the boys would stop, but they still waded slowly on. It +suddenly dawned on Tom that it was become very lonely and still. He +made one final struggle with his pride, and then darted after his +comrades, yelling: + +"Wait! Wait! I want to tell you something!" + +They presently stopped and turned around. When he got to where they +were, he began unfolding his secret, and they listened moodily till at +last they saw the "point" he was driving at, and then they set up a +war-whoop of applause and said it was "splendid!" and said if he had +told them at first, they wouldn't have started away. He made a plausible +excuse; but his real reason had been the fear that not even the secret +would keep them with him any very great length of time, and so he had +meant to hold it in reserve as a last seduction. + +The lads came gayly back and went at their sports again with a will, +chattering all the time about Tom's stupendous plan and admiring the +genius of it. After a dainty egg and fish dinner, Tom said he wanted to +learn to smoke, now. Joe caught at the idea and said he would like to +try, too. So Huck made pipes and filled them. These novices had never +smoked anything before but cigars made of grape-vine, and they "bit" +the tongue, and were not considered manly anyway. + +Now they stretched themselves out on their elbows and began to puff, +charily, and with slender confidence. The smoke had an unpleasant +taste, and they gagged a little, but Tom said: + +"Why, it's just as easy! If I'd a knowed this was all, I'd a learnt +long ago." + +"So would I," said Joe. "It's just nothing." + +"Why, many a time I've looked at people smoking, and thought well I +wish I could do that; but I never thought I could," said Tom. + +"That's just the way with me, hain't it, Huck? You've heard me talk +just that way--haven't you, Huck? I'll leave it to Huck if I haven't." + +"Yes--heaps of times," said Huck. + +"Well, I have too," said Tom; "oh, hundreds of times. Once down by the +slaughter-house. Don't you remember, Huck? Bob Tanner was there, and +Johnny Miller, and Jeff Thatcher, when I said it. Don't you remember, +Huck, 'bout me saying that?" + +"Yes, that's so," said Huck. "That was the day after I lost a white +alley. No, 'twas the day before." + +"There--I told you so," said Tom. "Huck recollects it." + +"I bleeve I could smoke this pipe all day," said Joe. "I don't feel +sick." + +"Neither do I," said Tom. "I could smoke it all day. But I bet you +Jeff Thatcher couldn't." + +"Jeff Thatcher! Why, he'd keel over just with two draws. Just let him +try it once. HE'D see!" + +"I bet he would. And Johnny Miller--I wish could see Johnny Miller +tackle it once." + +"Oh, don't I!" said Joe. "Why, I bet you Johnny Miller couldn't any +more do this than nothing. Just one little snifter would fetch HIM." + +"'Deed it would, Joe. Say--I wish the boys could see us now." + +"So do I." + +"Say--boys, don't say anything about it, and some time when they're +around, I'll come up to you and say, 'Joe, got a pipe? I want a smoke.' +And you'll say, kind of careless like, as if it warn't anything, you'll +say, 'Yes, I got my OLD pipe, and another one, but my tobacker ain't +very good.' And I'll say, 'Oh, that's all right, if it's STRONG +enough.' And then you'll out with the pipes, and we'll light up just as +ca'm, and then just see 'em look!" + +"By jings, that'll be gay, Tom! I wish it was NOW!" + +"So do I! And when we tell 'em we learned when we was off pirating, +won't they wish they'd been along?" + +"Oh, I reckon not! I'll just BET they will!" + +So the talk ran on. But presently it began to flag a trifle, and grow +disjointed. The silences widened; the expectoration marvellously +increased. Every pore inside the boys' cheeks became a spouting +fountain; they could scarcely bail out the cellars under their tongues +fast enough to prevent an inundation; little overflowings down their +throats occurred in spite of all they could do, and sudden retchings +followed every time. Both boys were looking very pale and miserable, +now. Joe's pipe dropped from his nerveless fingers. Tom's followed. +Both fountains were going furiously and both pumps bailing with might +and main. Joe said feebly: + +"I've lost my knife. I reckon I better go and find it." + +Tom said, with quivering lips and halting utterance: + +"I'll help you. You go over that way and I'll hunt around by the +spring. No, you needn't come, Huck--we can find it." + +So Huck sat down again, and waited an hour. Then he found it lonesome, +and went to find his comrades. They were wide apart in the woods, both +very pale, both fast asleep. But something informed him that if they +had had any trouble they had got rid of it. + +They were not talkative at supper that night. They had a humble look, +and when Huck prepared his pipe after the meal and was going to prepare +theirs, they said no, they were not feeling very well--something they +ate at dinner had disagreed with them. + +About midnight Joe awoke, and called the boys. There was a brooding +oppressiveness in the air that seemed to bode something. The boys +huddled themselves together and sought the friendly companionship of +the fire, though the dull dead heat of the breathless atmosphere was +stifling. They sat still, intent and waiting. The solemn hush +continued. Beyond the light of the fire everything was swallowed up in +the blackness of darkness. Presently there came a quivering glow that +vaguely revealed the foliage for a moment and then vanished. By and by +another came, a little stronger. Then another. Then a faint moan came +sighing through the branches of the forest and the boys felt a fleeting +breath upon their cheeks, and shuddered with the fancy that the Spirit +of the Night had gone by. There was a pause. Now a weird flash turned +night into day and showed every little grass-blade, separate and +distinct, that grew about their feet. And it showed three white, +startled faces, too. A deep peal of thunder went rolling and tumbling +down the heavens and lost itself in sullen rumblings in the distance. A +sweep of chilly air passed by, rustling all the leaves and snowing the +flaky ashes broadcast about the fire. Another fierce glare lit up the +forest and an instant crash followed that seemed to rend the tree-tops +right over the boys' heads. They clung together in terror, in the thick +gloom that followed. A few big rain-drops fell pattering upon the +leaves. + +"Quick! boys, go for the tent!" exclaimed Tom. + +They sprang away, stumbling over roots and among vines in the dark, no +two plunging in the same direction. A furious blast roared through the +trees, making everything sing as it went. One blinding flash after +another came, and peal on peal of deafening thunder. And now a +drenching rain poured down and the rising hurricane drove it in sheets +along the ground. The boys cried out to each other, but the roaring +wind and the booming thunder-blasts drowned their voices utterly. +However, one by one they straggled in at last and took shelter under +the tent, cold, scared, and streaming with water; but to have company +in misery seemed something to be grateful for. They could not talk, the +old sail flapped so furiously, even if the other noises would have +allowed them. The tempest rose higher and higher, and presently the +sail tore loose from its fastenings and went winging away on the blast. +The boys seized each others' hands and fled, with many tumblings and +bruises, to the shelter of a great oak that stood upon the river-bank. +Now the battle was at its highest. Under the ceaseless conflagration of +lightning that flamed in the skies, everything below stood out in +clean-cut and shadowless distinctness: the bending trees, the billowy +river, white with foam, the driving spray of spume-flakes, the dim +outlines of the high bluffs on the other side, glimpsed through the +drifting cloud-rack and the slanting veil of rain. Every little while +some giant tree yielded the fight and fell crashing through the younger +growth; and the unflagging thunder-peals came now in ear-splitting +explosive bursts, keen and sharp, and unspeakably appalling. The storm +culminated in one matchless effort that seemed likely to tear the island +to pieces, burn it up, drown it to the tree-tops, blow it away, and +deafen every creature in it, all at one and the same moment. It was a +wild night for homeless young heads to be out in. + +But at last the battle was done, and the forces retired with weaker +and weaker threatenings and grumblings, and peace resumed her sway. The +boys went back to camp, a good deal awed; but they found there was +still something to be thankful for, because the great sycamore, the +shelter of their beds, was a ruin, now, blasted by the lightnings, and +they were not under it when the catastrophe happened. + +Everything in camp was drenched, the camp-fire as well; for they were +but heedless lads, like their generation, and had made no provision +against rain. Here was matter for dismay, for they were soaked through +and chilled. They were eloquent in their distress; but they presently +discovered that the fire had eaten so far up under the great log it had +been built against (where it curved upward and separated itself from +the ground), that a handbreadth or so of it had escaped wetting; so +they patiently wrought until, with shreds and bark gathered from the +under sides of sheltered logs, they coaxed the fire to burn again. Then +they piled on great dead boughs till they had a roaring furnace, and +were glad-hearted once more. They dried their boiled ham and had a +feast, and after that they sat by the fire and expanded and glorified +their midnight adventure until morning, for there was not a dry spot to +sleep on, anywhere around. + +As the sun began to steal in upon the boys, drowsiness came over them, +and they went out on the sandbar and lay down to sleep. They got +scorched out by and by, and drearily set about getting breakfast. After +the meal they felt rusty, and stiff-jointed, and a little homesick once +more. Tom saw the signs, and fell to cheering up the pirates as well as +he could. But they cared nothing for marbles, or circus, or swimming, +or anything. He reminded them of the imposing secret, and raised a ray +of cheer. While it lasted, he got them interested in a new device. This +was to knock off being pirates, for a while, and be Indians for a +change. They were attracted by this idea; so it was not long before +they were stripped, and striped from head to heel with black mud, like +so many zebras--all of them chiefs, of course--and then they went +tearing through the woods to attack an English settlement. + +By and by they separated into three hostile tribes, and darted upon +each other from ambush with dreadful war-whoops, and killed and scalped +each other by thousands. It was a gory day. Consequently it was an +extremely satisfactory one. + +They assembled in camp toward supper-time, hungry and happy; but now a +difficulty arose--hostile Indians could not break the bread of +hospitality together without first making peace, and this was a simple +impossibility without smoking a pipe of peace. There was no other +process that ever they had heard of. Two of the savages almost wished +they had remained pirates. However, there was no other way; so with +such show of cheerfulness as they could muster they called for the pipe +and took their whiff as it passed, in due form. + +And behold, they were glad they had gone into savagery, for they had +gained something; they found that they could now smoke a little without +having to go and hunt for a lost knife; they did not get sick enough to +be seriously uncomfortable. They were not likely to fool away this high +promise for lack of effort. No, they practised cautiously, after +supper, with right fair success, and so they spent a jubilant evening. +They were prouder and happier in their new acquirement than they would +have been in the scalping and skinning of the Six Nations. We will +leave them to smoke and chatter and brag, since we have no further use +for them at present. + + + +CHAPTER XVII + +BUT there was no hilarity in the little town that same tranquil +Saturday afternoon. The Harpers, and Aunt Polly's family, were being +put into mourning, with great grief and many tears. An unusual quiet +possessed the village, although it was ordinarily quiet enough, in all +conscience. The villagers conducted their concerns with an absent air, +and talked little; but they sighed often. The Saturday holiday seemed a +burden to the children. They had no heart in their sports, and +gradually gave them up. + +In the afternoon Becky Thatcher found herself moping about the +deserted schoolhouse yard, and feeling very melancholy. But she found +nothing there to comfort her. She soliloquized: + +"Oh, if I only had a brass andiron-knob again! But I haven't got +anything now to remember him by." And she choked back a little sob. + +Presently she stopped, and said to herself: + +"It was right here. Oh, if it was to do over again, I wouldn't say +that--I wouldn't say it for the whole world. But he's gone now; I'll +never, never, never see him any more." + +This thought broke her down, and she wandered away, with tears rolling +down her cheeks. Then quite a group of boys and girls--playmates of +Tom's and Joe's--came by, and stood looking over the paling fence and +talking in reverent tones of how Tom did so-and-so the last time they +saw him, and how Joe said this and that small trifle (pregnant with +awful prophecy, as they could easily see now!)--and each speaker +pointed out the exact spot where the lost lads stood at the time, and +then added something like "and I was a-standing just so--just as I am +now, and as if you was him--I was as close as that--and he smiled, just +this way--and then something seemed to go all over me, like--awful, you +know--and I never thought what it meant, of course, but I can see now!" + +Then there was a dispute about who saw the dead boys last in life, and +many claimed that dismal distinction, and offered evidences, more or +less tampered with by the witness; and when it was ultimately decided +who DID see the departed last, and exchanged the last words with them, +the lucky parties took upon themselves a sort of sacred importance, and +were gaped at and envied by all the rest. One poor chap, who had no +other grandeur to offer, said with tolerably manifest pride in the +remembrance: + +"Well, Tom Sawyer he licked me once." + +But that bid for glory was a failure. Most of the boys could say that, +and so that cheapened the distinction too much. The group loitered +away, still recalling memories of the lost heroes, in awed voices. + +When the Sunday-school hour was finished, the next morning, the bell +began to toll, instead of ringing in the usual way. It was a very still +Sabbath, and the mournful sound seemed in keeping with the musing hush +that lay upon nature. The villagers began to gather, loitering a moment +in the vestibule to converse in whispers about the sad event. But there +was no whispering in the house; only the funereal rustling of dresses +as the women gathered to their seats disturbed the silence there. None +could remember when the little church had been so full before. There +was finally a waiting pause, an expectant dumbness, and then Aunt Polly +entered, followed by Sid and Mary, and they by the Harper family, all +in deep black, and the whole congregation, the old minister as well, +rose reverently and stood until the mourners were seated in the front +pew. There was another communing silence, broken at intervals by +muffled sobs, and then the minister spread his hands abroad and prayed. +A moving hymn was sung, and the text followed: "I am the Resurrection +and the Life." + +As the service proceeded, the clergyman drew such pictures of the +graces, the winning ways, and the rare promise of the lost lads that +every soul there, thinking he recognized these pictures, felt a pang in +remembering that he had persistently blinded himself to them always +before, and had as persistently seen only faults and flaws in the poor +boys. The minister related many a touching incident in the lives of the +departed, too, which illustrated their sweet, generous natures, and the +people could easily see, now, how noble and beautiful those episodes +were, and remembered with grief that at the time they occurred they had +seemed rank rascalities, well deserving of the cowhide. The +congregation became more and more moved, as the pathetic tale went on, +till at last the whole company broke down and joined the weeping +mourners in a chorus of anguished sobs, the preacher himself giving way +to his feelings, and crying in the pulpit. + +There was a rustle in the gallery, which nobody noticed; a moment +later the church door creaked; the minister raised his streaming eyes +above his handkerchief, and stood transfixed! First one and then +another pair of eyes followed the minister's, and then almost with one +impulse the congregation rose and stared while the three dead boys came +marching up the aisle, Tom in the lead, Joe next, and Huck, a ruin of +drooping rags, sneaking sheepishly in the rear! They had been hid in +the unused gallery listening to their own funeral sermon! + +Aunt Polly, Mary, and the Harpers threw themselves upon their restored +ones, smothered them with kisses and poured out thanksgivings, while +poor Huck stood abashed and uncomfortable, not knowing exactly what to +do or where to hide from so many unwelcoming eyes. He wavered, and +started to slink away, but Tom seized him and said: + +"Aunt Polly, it ain't fair. Somebody's got to be glad to see Huck." + +"And so they shall. I'm glad to see him, poor motherless thing!" And +the loving attentions Aunt Polly lavished upon him were the one thing +capable of making him more uncomfortable than he was before. + +Suddenly the minister shouted at the top of his voice: "Praise God +from whom all blessings flow--SING!--and put your hearts in it!" + +And they did. Old Hundred swelled up with a triumphant burst, and +while it shook the rafters Tom Sawyer the Pirate looked around upon the +envying juveniles about him and confessed in his heart that this was +the proudest moment of his life. + +As the "sold" congregation trooped out they said they would almost be +willing to be made ridiculous again to hear Old Hundred sung like that +once more. + +Tom got more cuffs and kisses that day--according to Aunt Polly's +varying moods--than he had earned before in a year; and he hardly knew +which expressed the most gratefulness to God and affection for himself. + + + +CHAPTER XVIII + +THAT was Tom's great secret--the scheme to return home with his +brother pirates and attend their own funerals. They had paddled over to +the Missouri shore on a log, at dusk on Saturday, landing five or six +miles below the village; they had slept in the woods at the edge of the +town till nearly daylight, and had then crept through back lanes and +alleys and finished their sleep in the gallery of the church among a +chaos of invalided benches. + +At breakfast, Monday morning, Aunt Polly and Mary were very loving to +Tom, and very attentive to his wants. There was an unusual amount of +talk. In the course of it Aunt Polly said: + +"Well, I don't say it wasn't a fine joke, Tom, to keep everybody +suffering 'most a week so you boys had a good time, but it is a pity +you could be so hard-hearted as to let me suffer so. If you could come +over on a log to go to your funeral, you could have come over and give +me a hint some way that you warn't dead, but only run off." + +"Yes, you could have done that, Tom," said Mary; "and I believe you +would if you had thought of it." + +"Would you, Tom?" said Aunt Polly, her face lighting wistfully. "Say, +now, would you, if you'd thought of it?" + +"I--well, I don't know. 'Twould 'a' spoiled everything." + +"Tom, I hoped you loved me that much," said Aunt Polly, with a grieved +tone that discomforted the boy. "It would have been something if you'd +cared enough to THINK of it, even if you didn't DO it." + +"Now, auntie, that ain't any harm," pleaded Mary; "it's only Tom's +giddy way--he is always in such a rush that he never thinks of +anything." + +"More's the pity. Sid would have thought. And Sid would have come and +DONE it, too. Tom, you'll look back, some day, when it's too late, and +wish you'd cared a little more for me when it would have cost you so +little." + +"Now, auntie, you know I do care for you," said Tom. + +"I'd know it better if you acted more like it." + +"I wish now I'd thought," said Tom, with a repentant tone; "but I +dreamt about you, anyway. That's something, ain't it?" + +"It ain't much--a cat does that much--but it's better than nothing. +What did you dream?" + +"Why, Wednesday night I dreamt that you was sitting over there by the +bed, and Sid was sitting by the woodbox, and Mary next to him." + +"Well, so we did. So we always do. I'm glad your dreams could take +even that much trouble about us." + +"And I dreamt that Joe Harper's mother was here." + +"Why, she was here! Did you dream any more?" + +"Oh, lots. But it's so dim, now." + +"Well, try to recollect--can't you?" + +"Somehow it seems to me that the wind--the wind blowed the--the--" + +"Try harder, Tom! The wind did blow something. Come!" + +Tom pressed his fingers on his forehead an anxious minute, and then +said: + +"I've got it now! I've got it now! It blowed the candle!" + +"Mercy on us! Go on, Tom--go on!" + +"And it seems to me that you said, 'Why, I believe that that door--'" + +"Go ON, Tom!" + +"Just let me study a moment--just a moment. Oh, yes--you said you +believed the door was open." + +"As I'm sitting here, I did! Didn't I, Mary! Go on!" + +"And then--and then--well I won't be certain, but it seems like as if +you made Sid go and--and--" + +"Well? Well? What did I make him do, Tom? What did I make him do?" + +"You made him--you--Oh, you made him shut it." + +"Well, for the land's sake! I never heard the beat of that in all my +days! Don't tell ME there ain't anything in dreams, any more. Sereny +Harper shall know of this before I'm an hour older. I'd like to see her +get around THIS with her rubbage 'bout superstition. Go on, Tom!" + +"Oh, it's all getting just as bright as day, now. Next you said I +warn't BAD, only mischeevous and harum-scarum, and not any more +responsible than--than--I think it was a colt, or something." + +"And so it was! Well, goodness gracious! Go on, Tom!" + +"And then you began to cry." + +"So I did. So I did. Not the first time, neither. And then--" + +"Then Mrs. Harper she began to cry, and said Joe was just the same, +and she wished she hadn't whipped him for taking cream when she'd +throwed it out her own self--" + +"Tom! The sperrit was upon you! You was a prophesying--that's what you +was doing! Land alive, go on, Tom!" + +"Then Sid he said--he said--" + +"I don't think I said anything," said Sid. + +"Yes you did, Sid," said Mary. + +"Shut your heads and let Tom go on! What did he say, Tom?" + +"He said--I THINK he said he hoped I was better off where I was gone +to, but if I'd been better sometimes--" + +"THERE, d'you hear that! It was his very words!" + +"And you shut him up sharp." + +"I lay I did! There must 'a' been an angel there. There WAS an angel +there, somewheres!" + +"And Mrs. Harper told about Joe scaring her with a firecracker, and +you told about Peter and the Painkiller--" + +"Just as true as I live!" + +"And then there was a whole lot of talk 'bout dragging the river for +us, and 'bout having the funeral Sunday, and then you and old Miss +Harper hugged and cried, and she went." + +"It happened just so! It happened just so, as sure as I'm a-sitting in +these very tracks. Tom, you couldn't told it more like if you'd 'a' +seen it! And then what? Go on, Tom!" + +"Then I thought you prayed for me--and I could see you and hear every +word you said. And you went to bed, and I was so sorry that I took and +wrote on a piece of sycamore bark, 'We ain't dead--we are only off +being pirates,' and put it on the table by the candle; and then you +looked so good, laying there asleep, that I thought I went and leaned +over and kissed you on the lips." + +"Did you, Tom, DID you! I just forgive you everything for that!" And +she seized the boy in a crushing embrace that made him feel like the +guiltiest of villains. + +"It was very kind, even though it was only a--dream," Sid soliloquized +just audibly. + +"Shut up, Sid! A body does just the same in a dream as he'd do if he +was awake. Here's a big Milum apple I've been saving for you, Tom, if +you was ever found again--now go 'long to school. I'm thankful to the +good God and Father of us all I've got you back, that's long-suffering +and merciful to them that believe on Him and keep His word, though +goodness knows I'm unworthy of it, but if only the worthy ones got His +blessings and had His hand to help them over the rough places, there's +few enough would smile here or ever enter into His rest when the long +night comes. Go 'long Sid, Mary, Tom--take yourselves off--you've +hendered me long enough." + +The children left for school, and the old lady to call on Mrs. Harper +and vanquish her realism with Tom's marvellous dream. Sid had better +judgment than to utter the thought that was in his mind as he left the +house. It was this: "Pretty thin--as long a dream as that, without any +mistakes in it!" + +What a hero Tom was become, now! He did not go skipping and prancing, +but moved with a dignified swagger as became a pirate who felt that the +public eye was on him. And indeed it was; he tried not to seem to see +the looks or hear the remarks as he passed along, but they were food +and drink to him. Smaller boys than himself flocked at his heels, as +proud to be seen with him, and tolerated by him, as if he had been the +drummer at the head of a procession or the elephant leading a menagerie +into town. Boys of his own size pretended not to know he had been away +at all; but they were consuming with envy, nevertheless. They would +have given anything to have that swarthy suntanned skin of his, and his +glittering notoriety; and Tom would not have parted with either for a +circus. + +At school the children made so much of him and of Joe, and delivered +such eloquent admiration from their eyes, that the two heroes were not +long in becoming insufferably "stuck-up." They began to tell their +adventures to hungry listeners--but they only began; it was not a thing +likely to have an end, with imaginations like theirs to furnish +material. And finally, when they got out their pipes and went serenely +puffing around, the very summit of glory was reached. + +Tom decided that he could be independent of Becky Thatcher now. Glory +was sufficient. He would live for glory. Now that he was distinguished, +maybe she would be wanting to "make up." Well, let her--she should see +that he could be as indifferent as some other people. Presently she +arrived. Tom pretended not to see her. He moved away and joined a group +of boys and girls and began to talk. Soon he observed that she was +tripping gayly back and forth with flushed face and dancing eyes, +pretending to be busy chasing schoolmates, and screaming with laughter +when she made a capture; but he noticed that she always made her +captures in his vicinity, and that she seemed to cast a conscious eye +in his direction at such times, too. It gratified all the vicious +vanity that was in him; and so, instead of winning him, it only "set +him up" the more and made him the more diligent to avoid betraying that +he knew she was about. Presently she gave over skylarking, and moved +irresolutely about, sighing once or twice and glancing furtively and +wistfully toward Tom. Then she observed that now Tom was talking more +particularly to Amy Lawrence than to any one else. She felt a sharp +pang and grew disturbed and uneasy at once. She tried to go away, but +her feet were treacherous, and carried her to the group instead. She +said to a girl almost at Tom's elbow--with sham vivacity: + +"Why, Mary Austin! you bad girl, why didn't you come to Sunday-school?" + +"I did come--didn't you see me?" + +"Why, no! Did you? Where did you sit?" + +"I was in Miss Peters' class, where I always go. I saw YOU." + +"Did you? Why, it's funny I didn't see you. I wanted to tell you about +the picnic." + +"Oh, that's jolly. Who's going to give it?" + +"My ma's going to let me have one." + +"Oh, goody; I hope she'll let ME come." + +"Well, she will. The picnic's for me. She'll let anybody come that I +want, and I want you." + +"That's ever so nice. When is it going to be?" + +"By and by. Maybe about vacation." + +"Oh, won't it be fun! You going to have all the girls and boys?" + +"Yes, every one that's friends to me--or wants to be"; and she glanced +ever so furtively at Tom, but he talked right along to Amy Lawrence +about the terrible storm on the island, and how the lightning tore the +great sycamore tree "all to flinders" while he was "standing within +three feet of it." + +"Oh, may I come?" said Grace Miller. + +"Yes." + +"And me?" said Sally Rogers. + +"Yes." + +"And me, too?" said Susy Harper. "And Joe?" + +"Yes." + +And so on, with clapping of joyful hands till all the group had begged +for invitations but Tom and Amy. Then Tom turned coolly away, still +talking, and took Amy with him. Becky's lips trembled and the tears +came to her eyes; she hid these signs with a forced gayety and went on +chattering, but the life had gone out of the picnic, now, and out of +everything else; she got away as soon as she could and hid herself and +had what her sex call "a good cry." Then she sat moody, with wounded +pride, till the bell rang. She roused up, now, with a vindictive cast +in her eye, and gave her plaited tails a shake and said she knew what +SHE'D do. + +At recess Tom continued his flirtation with Amy with jubilant +self-satisfaction. And he kept drifting about to find Becky and lacerate +her with the performance. At last he spied her, but there was a sudden +falling of his mercury. She was sitting cosily on a little bench behind +the schoolhouse looking at a picture-book with Alfred Temple--and so +absorbed were they, and their heads so close together over the book, +that they did not seem to be conscious of anything in the world besides. +Jealousy ran red-hot through Tom's veins. He began to hate himself for +throwing away the chance Becky had offered for a reconciliation. He +called himself a fool, and all the hard names he could think of. He +wanted to cry with vexation. Amy chatted happily along, as they walked, +for her heart was singing, but Tom's tongue had lost its function. He +did not hear what Amy was saying, and whenever she paused expectantly he +could only stammer an awkward assent, which was as often misplaced as +otherwise. He kept drifting to the rear of the schoolhouse, again and +again, to sear his eyeballs with the hateful spectacle there. He could +not help it. And it maddened him to see, as he thought he saw, that +Becky Thatcher never once suspected that he was even in the land of the +living. But she did see, nevertheless; and she knew she was winning her +fight, too, and was glad to see him suffer as she had suffered. + +Amy's happy prattle became intolerable. Tom hinted at things he had to +attend to; things that must be done; and time was fleeting. But in +vain--the girl chirped on. Tom thought, "Oh, hang her, ain't I ever +going to get rid of her?" At last he must be attending to those +things--and she said artlessly that she would be "around" when school +let out. And he hastened away, hating her for it. + +"Any other boy!" Tom thought, grating his teeth. "Any boy in the whole +town but that Saint Louis smarty that thinks he dresses so fine and is +aristocracy! Oh, all right, I licked you the first day you ever saw +this town, mister, and I'll lick you again! You just wait till I catch +you out! I'll just take and--" + +And he went through the motions of thrashing an imaginary boy +--pummelling the air, and kicking and gouging. "Oh, you do, do you? You +holler 'nough, do you? Now, then, let that learn you!" And so the +imaginary flogging was finished to his satisfaction. + +Tom fled home at noon. His conscience could not endure any more of +Amy's grateful happiness, and his jealousy could bear no more of the +other distress. Becky resumed her picture inspections with Alfred, but +as the minutes dragged along and no Tom came to suffer, her triumph +began to cloud and she lost interest; gravity and absent-mindedness +followed, and then melancholy; two or three times she pricked up her +ear at a footstep, but it was a false hope; no Tom came. At last she +grew entirely miserable and wished she hadn't carried it so far. When +poor Alfred, seeing that he was losing her, he did not know how, kept +exclaiming: "Oh, here's a jolly one! look at this!" she lost patience +at last, and said, "Oh, don't bother me! I don't care for them!" and +burst into tears, and got up and walked away. + +Alfred dropped alongside and was going to try to comfort her, but she +said: + +"Go away and leave me alone, can't you! I hate you!" + +So the boy halted, wondering what he could have done--for she had said +she would look at pictures all through the nooning--and she walked on, +crying. Then Alfred went musing into the deserted schoolhouse. He was +humiliated and angry. He easily guessed his way to the truth--the girl +had simply made a convenience of him to vent her spite upon Tom Sawyer. +He was far from hating Tom the less when this thought occurred to him. +He wished there was some way to get that boy into trouble without much +risk to himself. Tom's spelling-book fell under his eye. Here was his +opportunity. He gratefully opened to the lesson for the afternoon and +poured ink upon the page. + +Becky, glancing in at a window behind him at the moment, saw the act, +and moved on, without discovering herself. She started homeward, now, +intending to find Tom and tell him; Tom would be thankful and their +troubles would be healed. Before she was half way home, however, she +had changed her mind. The thought of Tom's treatment of her when she +was talking about her picnic came scorching back and filled her with +shame. She resolved to let him get whipped on the damaged +spelling-book's account, and to hate him forever, into the bargain. + + + +CHAPTER XIX + +TOM arrived at home in a dreary mood, and the first thing his aunt +said to him showed him that he had brought his sorrows to an +unpromising market: + +"Tom, I've a notion to skin you alive!" + +"Auntie, what have I done?" + +"Well, you've done enough. Here I go over to Sereny Harper, like an +old softy, expecting I'm going to make her believe all that rubbage +about that dream, when lo and behold you she'd found out from Joe that +you was over here and heard all the talk we had that night. Tom, I +don't know what is to become of a boy that will act like that. It makes +me feel so bad to think you could let me go to Sereny Harper and make +such a fool of myself and never say a word." + +This was a new aspect of the thing. His smartness of the morning had +seemed to Tom a good joke before, and very ingenious. It merely looked +mean and shabby now. He hung his head and could not think of anything +to say for a moment. Then he said: + +"Auntie, I wish I hadn't done it--but I didn't think." + +"Oh, child, you never think. You never think of anything but your own +selfishness. You could think to come all the way over here from +Jackson's Island in the night to laugh at our troubles, and you could +think to fool me with a lie about a dream; but you couldn't ever think +to pity us and save us from sorrow." + +"Auntie, I know now it was mean, but I didn't mean to be mean. I +didn't, honest. And besides, I didn't come over here to laugh at you +that night." + +"What did you come for, then?" + +"It was to tell you not to be uneasy about us, because we hadn't got +drownded." + +"Tom, Tom, I would be the thankfullest soul in this world if I could +believe you ever had as good a thought as that, but you know you never +did--and I know it, Tom." + +"Indeed and 'deed I did, auntie--I wish I may never stir if I didn't." + +"Oh, Tom, don't lie--don't do it. It only makes things a hundred times +worse." + +"It ain't a lie, auntie; it's the truth. I wanted to keep you from +grieving--that was all that made me come." + +"I'd give the whole world to believe that--it would cover up a power +of sins, Tom. I'd 'most be glad you'd run off and acted so bad. But it +ain't reasonable; because, why didn't you tell me, child?" + +"Why, you see, when you got to talking about the funeral, I just got +all full of the idea of our coming and hiding in the church, and I +couldn't somehow bear to spoil it. So I just put the bark back in my +pocket and kept mum." + +"What bark?" + +"The bark I had wrote on to tell you we'd gone pirating. I wish, now, +you'd waked up when I kissed you--I do, honest." + +The hard lines in his aunt's face relaxed and a sudden tenderness +dawned in her eyes. + +"DID you kiss me, Tom?" + +"Why, yes, I did." + +"Are you sure you did, Tom?" + +"Why, yes, I did, auntie--certain sure." + +"What did you kiss me for, Tom?" + +"Because I loved you so, and you laid there moaning and I was so sorry." + +The words sounded like truth. The old lady could not hide a tremor in +her voice when she said: + +"Kiss me again, Tom!--and be off with you to school, now, and don't +bother me any more." + +The moment he was gone, she ran to a closet and got out the ruin of a +jacket which Tom had gone pirating in. Then she stopped, with it in her +hand, and said to herself: + +"No, I don't dare. Poor boy, I reckon he's lied about it--but it's a +blessed, blessed lie, there's such a comfort come from it. I hope the +Lord--I KNOW the Lord will forgive him, because it was such +goodheartedness in him to tell it. But I don't want to find out it's a +lie. I won't look." + +She put the jacket away, and stood by musing a minute. Twice she put +out her hand to take the garment again, and twice she refrained. Once +more she ventured, and this time she fortified herself with the +thought: "It's a good lie--it's a good lie--I won't let it grieve me." +So she sought the jacket pocket. A moment later she was reading Tom's +piece of bark through flowing tears and saying: "I could forgive the +boy, now, if he'd committed a million sins!" + + + +CHAPTER XX + +THERE was something about Aunt Polly's manner, when she kissed Tom, +that swept away his low spirits and made him lighthearted and happy +again. He started to school and had the luck of coming upon Becky +Thatcher at the head of Meadow Lane. His mood always determined his +manner. Without a moment's hesitation he ran to her and said: + +"I acted mighty mean to-day, Becky, and I'm so sorry. I won't ever, +ever do that way again, as long as ever I live--please make up, won't +you?" + +The girl stopped and looked him scornfully in the face: + +"I'll thank you to keep yourself TO yourself, Mr. Thomas Sawyer. I'll +never speak to you again." + +She tossed her head and passed on. Tom was so stunned that he had not +even presence of mind enough to say "Who cares, Miss Smarty?" until the +right time to say it had gone by. So he said nothing. But he was in a +fine rage, nevertheless. He moped into the schoolyard wishing she were +a boy, and imagining how he would trounce her if she were. He presently +encountered her and delivered a stinging remark as he passed. She +hurled one in return, and the angry breach was complete. It seemed to +Becky, in her hot resentment, that she could hardly wait for school to +"take in," she was so impatient to see Tom flogged for the injured +spelling-book. If she had had any lingering notion of exposing Alfred +Temple, Tom's offensive fling had driven it entirely away. + +Poor girl, she did not know how fast she was nearing trouble herself. +The master, Mr. Dobbins, had reached middle age with an unsatisfied +ambition. The darling of his desires was, to be a doctor, but poverty +had decreed that he should be nothing higher than a village +schoolmaster. Every day he took a mysterious book out of his desk and +absorbed himself in it at times when no classes were reciting. He kept +that book under lock and key. There was not an urchin in school but was +perishing to have a glimpse of it, but the chance never came. Every boy +and girl had a theory about the nature of that book; but no two +theories were alike, and there was no way of getting at the facts in +the case. Now, as Becky was passing by the desk, which stood near the +door, she noticed that the key was in the lock! It was a precious +moment. She glanced around; found herself alone, and the next instant +she had the book in her hands. The title-page--Professor Somebody's +ANATOMY--carried no information to her mind; so she began to turn the +leaves. She came at once upon a handsomely engraved and colored +frontispiece--a human figure, stark naked. At that moment a shadow fell +on the page and Tom Sawyer stepped in at the door and caught a glimpse +of the picture. Becky snatched at the book to close it, and had the +hard luck to tear the pictured page half down the middle. She thrust +the volume into the desk, turned the key, and burst out crying with +shame and vexation. + +"Tom Sawyer, you are just as mean as you can be, to sneak up on a +person and look at what they're looking at." + +"How could I know you was looking at anything?" + +"You ought to be ashamed of yourself, Tom Sawyer; you know you're +going to tell on me, and oh, what shall I do, what shall I do! I'll be +whipped, and I never was whipped in school." + +Then she stamped her little foot and said: + +"BE so mean if you want to! I know something that's going to happen. +You just wait and you'll see! Hateful, hateful, hateful!"--and she +flung out of the house with a new explosion of crying. + +Tom stood still, rather flustered by this onslaught. Presently he said +to himself: + +"What a curious kind of a fool a girl is! Never been licked in school! +Shucks! What's a licking! That's just like a girl--they're so +thin-skinned and chicken-hearted. Well, of course I ain't going to tell +old Dobbins on this little fool, because there's other ways of getting +even on her, that ain't so mean; but what of it? Old Dobbins will ask +who it was tore his book. Nobody'll answer. Then he'll do just the way +he always does--ask first one and then t'other, and when he comes to the +right girl he'll know it, without any telling. Girls' faces always tell +on them. They ain't got any backbone. She'll get licked. Well, it's a +kind of a tight place for Becky Thatcher, because there ain't any way +out of it." Tom conned the thing a moment longer, and then added: "All +right, though; she'd like to see me in just such a fix--let her sweat it +out!" + +Tom joined the mob of skylarking scholars outside. In a few moments +the master arrived and school "took in." Tom did not feel a strong +interest in his studies. Every time he stole a glance at the girls' +side of the room Becky's face troubled him. Considering all things, he +did not want to pity her, and yet it was all he could do to help it. He +could get up no exultation that was really worthy the name. Presently +the spelling-book discovery was made, and Tom's mind was entirely full +of his own matters for a while after that. Becky roused up from her +lethargy of distress and showed good interest in the proceedings. She +did not expect that Tom could get out of his trouble by denying that he +spilt the ink on the book himself; and she was right. The denial only +seemed to make the thing worse for Tom. Becky supposed she would be +glad of that, and she tried to believe she was glad of it, but she +found she was not certain. When the worst came to the worst, she had an +impulse to get up and tell on Alfred Temple, but she made an effort and +forced herself to keep still--because, said she to herself, "he'll tell +about me tearing the picture sure. I wouldn't say a word, not to save +his life!" + +Tom took his whipping and went back to his seat not at all +broken-hearted, for he thought it was possible that he had unknowingly +upset the ink on the spelling-book himself, in some skylarking bout--he +had denied it for form's sake and because it was custom, and had stuck +to the denial from principle. + +A whole hour drifted by, the master sat nodding in his throne, the air +was drowsy with the hum of study. By and by, Mr. Dobbins straightened +himself up, yawned, then unlocked his desk, and reached for his book, +but seemed undecided whether to take it out or leave it. Most of the +pupils glanced up languidly, but there were two among them that watched +his movements with intent eyes. Mr. Dobbins fingered his book absently +for a while, then took it out and settled himself in his chair to read! +Tom shot a glance at Becky. He had seen a hunted and helpless rabbit +look as she did, with a gun levelled at its head. Instantly he forgot +his quarrel with her. Quick--something must be done! done in a flash, +too! But the very imminence of the emergency paralyzed his invention. +Good!--he had an inspiration! He would run and snatch the book, spring +through the door and fly. But his resolution shook for one little +instant, and the chance was lost--the master opened the volume. If Tom +only had the wasted opportunity back again! Too late. There was no help +for Becky now, he said. The next moment the master faced the school. +Every eye sank under his gaze. There was that in it which smote even +the innocent with fear. There was silence while one might count ten +--the master was gathering his wrath. Then he spoke: "Who tore this book?" + +There was not a sound. One could have heard a pin drop. The stillness +continued; the master searched face after face for signs of guilt. + +"Benjamin Rogers, did you tear this book?" + +A denial. Another pause. + +"Joseph Harper, did you?" + +Another denial. Tom's uneasiness grew more and more intense under the +slow torture of these proceedings. The master scanned the ranks of +boys--considered a while, then turned to the girls: + +"Amy Lawrence?" + +A shake of the head. + +"Gracie Miller?" + +The same sign. + +"Susan Harper, did you do this?" + +Another negative. The next girl was Becky Thatcher. Tom was trembling +from head to foot with excitement and a sense of the hopelessness of +the situation. + +"Rebecca Thatcher" [Tom glanced at her face--it was white with terror] +--"did you tear--no, look me in the face" [her hands rose in appeal] +--"did you tear this book?" + +A thought shot like lightning through Tom's brain. He sprang to his +feet and shouted--"I done it!" + +The school stared in perplexity at this incredible folly. Tom stood a +moment, to gather his dismembered faculties; and when he stepped +forward to go to his punishment the surprise, the gratitude, the +adoration that shone upon him out of poor Becky's eyes seemed pay +enough for a hundred floggings. Inspired by the splendor of his own +act, he took without an outcry the most merciless flaying that even Mr. +Dobbins had ever administered; and also received with indifference the +added cruelty of a command to remain two hours after school should be +dismissed--for he knew who would wait for him outside till his +captivity was done, and not count the tedious time as loss, either. + +Tom went to bed that night planning vengeance against Alfred Temple; +for with shame and repentance Becky had told him all, not forgetting +her own treachery; but even the longing for vengeance had to give way, +soon, to pleasanter musings, and he fell asleep at last with Becky's +latest words lingering dreamily in his ear-- + +"Tom, how COULD you be so noble!" + + + +CHAPTER XXI + +VACATION was approaching. The schoolmaster, always severe, grew +severer and more exacting than ever, for he wanted the school to make a +good showing on "Examination" day. His rod and his ferule were seldom +idle now--at least among the smaller pupils. Only the biggest boys, and +young ladies of eighteen and twenty, escaped lashing. Mr. Dobbins' +lashings were very vigorous ones, too; for although he carried, under +his wig, a perfectly bald and shiny head, he had only reached middle +age, and there was no sign of feebleness in his muscle. As the great +day approached, all the tyranny that was in him came to the surface; he +seemed to take a vindictive pleasure in punishing the least +shortcomings. The consequence was, that the smaller boys spent their +days in terror and suffering and their nights in plotting revenge. They +threw away no opportunity to do the master a mischief. But he kept +ahead all the time. The retribution that followed every vengeful +success was so sweeping and majestic that the boys always retired from +the field badly worsted. At last they conspired together and hit upon a +plan that promised a dazzling victory. They swore in the sign-painter's +boy, told him the scheme, and asked his help. He had his own reasons +for being delighted, for the master boarded in his father's family and +had given the boy ample cause to hate him. The master's wife would go +on a visit to the country in a few days, and there would be nothing to +interfere with the plan; the master always prepared himself for great +occasions by getting pretty well fuddled, and the sign-painter's boy +said that when the dominie had reached the proper condition on +Examination Evening he would "manage the thing" while he napped in his +chair; then he would have him awakened at the right time and hurried +away to school. + +In the fulness of time the interesting occasion arrived. At eight in +the evening the schoolhouse was brilliantly lighted, and adorned with +wreaths and festoons of foliage and flowers. The master sat throned in +his great chair upon a raised platform, with his blackboard behind him. +He was looking tolerably mellow. Three rows of benches on each side and +six rows in front of him were occupied by the dignitaries of the town +and by the parents of the pupils. To his left, back of the rows of +citizens, was a spacious temporary platform upon which were seated the +scholars who were to take part in the exercises of the evening; rows of +small boys, washed and dressed to an intolerable state of discomfort; +rows of gawky big boys; snowbanks of girls and young ladies clad in +lawn and muslin and conspicuously conscious of their bare arms, their +grandmothers' ancient trinkets, their bits of pink and blue ribbon and +the flowers in their hair. All the rest of the house was filled with +non-participating scholars. + +The exercises began. A very little boy stood up and sheepishly +recited, "You'd scarce expect one of my age to speak in public on the +stage," etc.--accompanying himself with the painfully exact and +spasmodic gestures which a machine might have used--supposing the +machine to be a trifle out of order. But he got through safely, though +cruelly scared, and got a fine round of applause when he made his +manufactured bow and retired. + +A little shamefaced girl lisped, "Mary had a little lamb," etc., +performed a compassion-inspiring curtsy, got her meed of applause, and +sat down flushed and happy. + +Tom Sawyer stepped forward with conceited confidence and soared into +the unquenchable and indestructible "Give me liberty or give me death" +speech, with fine fury and frantic gesticulation, and broke down in the +middle of it. A ghastly stage-fright seized him, his legs quaked under +him and he was like to choke. True, he had the manifest sympathy of the +house but he had the house's silence, too, which was even worse than +its sympathy. The master frowned, and this completed the disaster. Tom +struggled awhile and then retired, utterly defeated. There was a weak +attempt at applause, but it died early. + +"The Boy Stood on the Burning Deck" followed; also "The Assyrian Came +Down," and other declamatory gems. Then there were reading exercises, +and a spelling fight. The meagre Latin class recited with honor. The +prime feature of the evening was in order, now--original "compositions" +by the young ladies. Each in her turn stepped forward to the edge of +the platform, cleared her throat, held up her manuscript (tied with +dainty ribbon), and proceeded to read, with labored attention to +"expression" and punctuation. The themes were the same that had been +illuminated upon similar occasions by their mothers before them, their +grandmothers, and doubtless all their ancestors in the female line +clear back to the Crusades. "Friendship" was one; "Memories of Other +Days"; "Religion in History"; "Dream Land"; "The Advantages of +Culture"; "Forms of Political Government Compared and Contrasted"; +"Melancholy"; "Filial Love"; "Heart Longings," etc., etc. + +A prevalent feature in these compositions was a nursed and petted +melancholy; another was a wasteful and opulent gush of "fine language"; +another was a tendency to lug in by the ears particularly prized words +and phrases until they were worn entirely out; and a peculiarity that +conspicuously marked and marred them was the inveterate and intolerable +sermon that wagged its crippled tail at the end of each and every one +of them. No matter what the subject might be, a brain-racking effort +was made to squirm it into some aspect or other that the moral and +religious mind could contemplate with edification. The glaring +insincerity of these sermons was not sufficient to compass the +banishment of the fashion from the schools, and it is not sufficient +to-day; it never will be sufficient while the world stands, perhaps. +There is no school in all our land where the young ladies do not feel +obliged to close their compositions with a sermon; and you will find +that the sermon of the most frivolous and the least religious girl in +the school is always the longest and the most relentlessly pious. But +enough of this. Homely truth is unpalatable. + +Let us return to the "Examination." The first composition that was +read was one entitled "Is this, then, Life?" Perhaps the reader can +endure an extract from it: + + "In the common walks of life, with what delightful + emotions does the youthful mind look forward to some + anticipated scene of festivity! Imagination is busy + sketching rose-tinted pictures of joy. In fancy, the + voluptuous votary of fashion sees herself amid the + festive throng, 'the observed of all observers.' Her + graceful form, arrayed in snowy robes, is whirling + through the mazes of the joyous dance; her eye is + brightest, her step is lightest in the gay assembly. + + "In such delicious fancies time quickly glides by, + and the welcome hour arrives for her entrance into + the Elysian world, of which she has had such bright + dreams. How fairy-like does everything appear to + her enchanted vision! Each new scene is more charming + than the last. But after a while she finds that + beneath this goodly exterior, all is vanity, the + flattery which once charmed her soul, now grates + harshly upon her ear; the ball-room has lost its + charms; and with wasted health and imbittered heart, + she turns away with the conviction that earthly + pleasures cannot satisfy the longings of the soul!" + +And so forth and so on. There was a buzz of gratification from time to +time during the reading, accompanied by whispered ejaculations of "How +sweet!" "How eloquent!" "So true!" etc., and after the thing had closed +with a peculiarly afflicting sermon the applause was enthusiastic. + +Then arose a slim, melancholy girl, whose face had the "interesting" +paleness that comes of pills and indigestion, and read a "poem." Two +stanzas of it will do: + + "A MISSOURI MAIDEN'S FAREWELL TO ALABAMA + + "Alabama, good-bye! I love thee well! + But yet for a while do I leave thee now! + Sad, yes, sad thoughts of thee my heart doth swell, + And burning recollections throng my brow! + For I have wandered through thy flowery woods; + Have roamed and read near Tallapoosa's stream; + Have listened to Tallassee's warring floods, + And wooed on Coosa's side Aurora's beam. + + "Yet shame I not to bear an o'er-full heart, + Nor blush to turn behind my tearful eyes; + 'Tis from no stranger land I now must part, + 'Tis to no strangers left I yield these sighs. + Welcome and home were mine within this State, + Whose vales I leave--whose spires fade fast from me + And cold must be mine eyes, and heart, and tete, + When, dear Alabama! they turn cold on thee!" + +There were very few there who knew what "tete" meant, but the poem was +very satisfactory, nevertheless. + +Next appeared a dark-complexioned, black-eyed, black-haired young +lady, who paused an impressive moment, assumed a tragic expression, and +began to read in a measured, solemn tone: + + "A VISION + + "Dark and tempestuous was night. Around the + throne on high not a single star quivered; but + the deep intonations of the heavy thunder + constantly vibrated upon the ear; whilst the + terrific lightning revelled in angry mood + through the cloudy chambers of heaven, seeming + to scorn the power exerted over its terror by + the illustrious Franklin! Even the boisterous + winds unanimously came forth from their mystic + homes, and blustered about as if to enhance by + their aid the wildness of the scene. + + "At such a time, so dark, so dreary, for human + sympathy my very spirit sighed; but instead thereof, + + "'My dearest friend, my counsellor, my comforter + and guide--My joy in grief, my second bliss + in joy,' came to my side. She moved like one of + those bright beings pictured in the sunny walks + of fancy's Eden by the romantic and young, a + queen of beauty unadorned save by her own + transcendent loveliness. So soft was her step, it + failed to make even a sound, and but for the + magical thrill imparted by her genial touch, as + other unobtrusive beauties, she would have glided + away un-perceived--unsought. A strange sadness + rested upon her features, like icy tears upon + the robe of December, as she pointed to the + contending elements without, and bade me contemplate + the two beings presented." + +This nightmare occupied some ten pages of manuscript and wound up with +a sermon so destructive of all hope to non-Presbyterians that it took +the first prize. This composition was considered to be the very finest +effort of the evening. The mayor of the village, in delivering the +prize to the author of it, made a warm speech in which he said that it +was by far the most "eloquent" thing he had ever listened to, and that +Daniel Webster himself might well be proud of it. + +It may be remarked, in passing, that the number of compositions in +which the word "beauteous" was over-fondled, and human experience +referred to as "life's page," was up to the usual average. + +Now the master, mellow almost to the verge of geniality, put his chair +aside, turned his back to the audience, and began to draw a map of +America on the blackboard, to exercise the geography class upon. But he +made a sad business of it with his unsteady hand, and a smothered +titter rippled over the house. He knew what the matter was, and set +himself to right it. He sponged out lines and remade them; but he only +distorted them more than ever, and the tittering was more pronounced. +He threw his entire attention upon his work, now, as if determined not +to be put down by the mirth. He felt that all eyes were fastened upon +him; he imagined he was succeeding, and yet the tittering continued; it +even manifestly increased. And well it might. There was a garret above, +pierced with a scuttle over his head; and down through this scuttle +came a cat, suspended around the haunches by a string; she had a rag +tied about her head and jaws to keep her from mewing; as she slowly +descended she curved upward and clawed at the string, she swung +downward and clawed at the intangible air. The tittering rose higher +and higher--the cat was within six inches of the absorbed teacher's +head--down, down, a little lower, and she grabbed his wig with her +desperate claws, clung to it, and was snatched up into the garret in an +instant with her trophy still in her possession! And how the light did +blaze abroad from the master's bald pate--for the sign-painter's boy +had GILDED it! + +That broke up the meeting. The boys were avenged. Vacation had come. + + NOTE:--The pretended "compositions" quoted in + this chapter are taken without alteration from a + volume entitled "Prose and Poetry, by a Western + Lady"--but they are exactly and precisely after + the schoolgirl pattern, and hence are much + happier than any mere imitations could be. + + + +CHAPTER XXII + +TOM joined the new order of Cadets of Temperance, being attracted by +the showy character of their "regalia." He promised to abstain from +smoking, chewing, and profanity as long as he remained a member. Now he +found out a new thing--namely, that to promise not to do a thing is the +surest way in the world to make a body want to go and do that very +thing. Tom soon found himself tormented with a desire to drink and +swear; the desire grew to be so intense that nothing but the hope of a +chance to display himself in his red sash kept him from withdrawing +from the order. Fourth of July was coming; but he soon gave that up +--gave it up before he had worn his shackles over forty-eight hours--and +fixed his hopes upon old Judge Frazer, justice of the peace, who was +apparently on his deathbed and would have a big public funeral, since +he was so high an official. During three days Tom was deeply concerned +about the Judge's condition and hungry for news of it. Sometimes his +hopes ran high--so high that he would venture to get out his regalia +and practise before the looking-glass. But the Judge had a most +discouraging way of fluctuating. At last he was pronounced upon the +mend--and then convalescent. Tom was disgusted; and felt a sense of +injury, too. He handed in his resignation at once--and that night the +Judge suffered a relapse and died. Tom resolved that he would never +trust a man like that again. + +The funeral was a fine thing. The Cadets paraded in a style calculated +to kill the late member with envy. Tom was a free boy again, however +--there was something in that. He could drink and swear, now--but found +to his surprise that he did not want to. The simple fact that he could, +took the desire away, and the charm of it. + +Tom presently wondered to find that his coveted vacation was beginning +to hang a little heavily on his hands. + +He attempted a diary--but nothing happened during three days, and so +he abandoned it. + +The first of all the negro minstrel shows came to town, and made a +sensation. Tom and Joe Harper got up a band of performers and were +happy for two days. + +Even the Glorious Fourth was in some sense a failure, for it rained +hard, there was no procession in consequence, and the greatest man in +the world (as Tom supposed), Mr. Benton, an actual United States +Senator, proved an overwhelming disappointment--for he was not +twenty-five feet high, nor even anywhere in the neighborhood of it. + +A circus came. The boys played circus for three days afterward in +tents made of rag carpeting--admission, three pins for boys, two for +girls--and then circusing was abandoned. + +A phrenologist and a mesmerizer came--and went again and left the +village duller and drearier than ever. + +There were some boys-and-girls' parties, but they were so few and so +delightful that they only made the aching voids between ache the harder. + +Becky Thatcher was gone to her Constantinople home to stay with her +parents during vacation--so there was no bright side to life anywhere. + +The dreadful secret of the murder was a chronic misery. It was a very +cancer for permanency and pain. + +Then came the measles. + +During two long weeks Tom lay a prisoner, dead to the world and its +happenings. He was very ill, he was interested in nothing. When he got +upon his feet at last and moved feebly down-town, a melancholy change +had come over everything and every creature. There had been a +"revival," and everybody had "got religion," not only the adults, but +even the boys and girls. Tom went about, hoping against hope for the +sight of one blessed sinful face, but disappointment crossed him +everywhere. He found Joe Harper studying a Testament, and turned sadly +away from the depressing spectacle. He sought Ben Rogers, and found him +visiting the poor with a basket of tracts. He hunted up Jim Hollis, who +called his attention to the precious blessing of his late measles as a +warning. Every boy he encountered added another ton to his depression; +and when, in desperation, he flew for refuge at last to the bosom of +Huckleberry Finn and was received with a Scriptural quotation, his +heart broke and he crept home and to bed realizing that he alone of all +the town was lost, forever and forever. + +And that night there came on a terrific storm, with driving rain, +awful claps of thunder and blinding sheets of lightning. He covered his +head with the bedclothes and waited in a horror of suspense for his +doom; for he had not the shadow of a doubt that all this hubbub was +about him. He believed he had taxed the forbearance of the powers above +to the extremity of endurance and that this was the result. It might +have seemed to him a waste of pomp and ammunition to kill a bug with a +battery of artillery, but there seemed nothing incongruous about the +getting up such an expensive thunderstorm as this to knock the turf +from under an insect like himself. + +By and by the tempest spent itself and died without accomplishing its +object. The boy's first impulse was to be grateful, and reform. His +second was to wait--for there might not be any more storms. + +The next day the doctors were back; Tom had relapsed. The three weeks +he spent on his back this time seemed an entire age. When he got abroad +at last he was hardly grateful that he had been spared, remembering how +lonely was his estate, how companionless and forlorn he was. He drifted +listlessly down the street and found Jim Hollis acting as judge in a +juvenile court that was trying a cat for murder, in the presence of her +victim, a bird. He found Joe Harper and Huck Finn up an alley eating a +stolen melon. Poor lads! they--like Tom--had suffered a relapse. + + + +CHAPTER XXIII + +AT last the sleepy atmosphere was stirred--and vigorously: the murder +trial came on in the court. It became the absorbing topic of village +talk immediately. Tom could not get away from it. Every reference to +the murder sent a shudder to his heart, for his troubled conscience and +fears almost persuaded him that these remarks were put forth in his +hearing as "feelers"; he did not see how he could be suspected of +knowing anything about the murder, but still he could not be +comfortable in the midst of this gossip. It kept him in a cold shiver +all the time. He took Huck to a lonely place to have a talk with him. +It would be some relief to unseal his tongue for a little while; to +divide his burden of distress with another sufferer. Moreover, he +wanted to assure himself that Huck had remained discreet. + +"Huck, have you ever told anybody about--that?" + +"'Bout what?" + +"You know what." + +"Oh--'course I haven't." + +"Never a word?" + +"Never a solitary word, so help me. What makes you ask?" + +"Well, I was afeard." + +"Why, Tom Sawyer, we wouldn't be alive two days if that got found out. +YOU know that." + +Tom felt more comfortable. After a pause: + +"Huck, they couldn't anybody get you to tell, could they?" + +"Get me to tell? Why, if I wanted that half-breed devil to drownd me +they could get me to tell. They ain't no different way." + +"Well, that's all right, then. I reckon we're safe as long as we keep +mum. But let's swear again, anyway. It's more surer." + +"I'm agreed." + +So they swore again with dread solemnities. + +"What is the talk around, Huck? I've heard a power of it." + +"Talk? Well, it's just Muff Potter, Muff Potter, Muff Potter all the +time. It keeps me in a sweat, constant, so's I want to hide som'ers." + +"That's just the same way they go on round me. I reckon he's a goner. +Don't you feel sorry for him, sometimes?" + +"Most always--most always. He ain't no account; but then he hain't +ever done anything to hurt anybody. Just fishes a little, to get money +to get drunk on--and loafs around considerable; but lord, we all do +that--leastways most of us--preachers and such like. But he's kind of +good--he give me half a fish, once, when there warn't enough for two; +and lots of times he's kind of stood by me when I was out of luck." + +"Well, he's mended kites for me, Huck, and knitted hooks on to my +line. I wish we could get him out of there." + +"My! we couldn't get him out, Tom. And besides, 'twouldn't do any +good; they'd ketch him again." + +"Yes--so they would. But I hate to hear 'em abuse him so like the +dickens when he never done--that." + +"I do too, Tom. Lord, I hear 'em say he's the bloodiest looking +villain in this country, and they wonder he wasn't ever hung before." + +"Yes, they talk like that, all the time. I've heard 'em say that if he +was to get free they'd lynch him." + +"And they'd do it, too." + +The boys had a long talk, but it brought them little comfort. As the +twilight drew on, they found themselves hanging about the neighborhood +of the little isolated jail, perhaps with an undefined hope that +something would happen that might clear away their difficulties. But +nothing happened; there seemed to be no angels or fairies interested in +this luckless captive. + +The boys did as they had often done before--went to the cell grating +and gave Potter some tobacco and matches. He was on the ground floor +and there were no guards. + +His gratitude for their gifts had always smote their consciences +before--it cut deeper than ever, this time. They felt cowardly and +treacherous to the last degree when Potter said: + +"You've been mighty good to me, boys--better'n anybody else in this +town. And I don't forget it, I don't. Often I says to myself, says I, +'I used to mend all the boys' kites and things, and show 'em where the +good fishin' places was, and befriend 'em what I could, and now they've +all forgot old Muff when he's in trouble; but Tom don't, and Huck +don't--THEY don't forget him, says I, 'and I don't forget them.' Well, +boys, I done an awful thing--drunk and crazy at the time--that's the +only way I account for it--and now I got to swing for it, and it's +right. Right, and BEST, too, I reckon--hope so, anyway. Well, we won't +talk about that. I don't want to make YOU feel bad; you've befriended +me. But what I want to say, is, don't YOU ever get drunk--then you won't +ever get here. Stand a litter furder west--so--that's it; it's a prime +comfort to see faces that's friendly when a body's in such a muck of +trouble, and there don't none come here but yourn. Good friendly +faces--good friendly faces. Git up on one another's backs and let me +touch 'em. That's it. Shake hands--yourn'll come through the bars, but +mine's too big. Little hands, and weak--but they've helped Muff Potter +a power, and they'd help him more if they could." + +Tom went home miserable, and his dreams that night were full of +horrors. The next day and the day after, he hung about the court-room, +drawn by an almost irresistible impulse to go in, but forcing himself +to stay out. Huck was having the same experience. They studiously +avoided each other. Each wandered away, from time to time, but the same +dismal fascination always brought them back presently. Tom kept his +ears open when idlers sauntered out of the court-room, but invariably +heard distressing news--the toils were closing more and more +relentlessly around poor Potter. At the end of the second day the +village talk was to the effect that Injun Joe's evidence stood firm and +unshaken, and that there was not the slightest question as to what the +jury's verdict would be. + +Tom was out late, that night, and came to bed through the window. He +was in a tremendous state of excitement. It was hours before he got to +sleep. All the village flocked to the court-house the next morning, for +this was to be the great day. Both sexes were about equally represented +in the packed audience. After a long wait the jury filed in and took +their places; shortly afterward, Potter, pale and haggard, timid and +hopeless, was brought in, with chains upon him, and seated where all +the curious eyes could stare at him; no less conspicuous was Injun Joe, +stolid as ever. There was another pause, and then the judge arrived and +the sheriff proclaimed the opening of the court. The usual whisperings +among the lawyers and gathering together of papers followed. These +details and accompanying delays worked up an atmosphere of preparation +that was as impressive as it was fascinating. + +Now a witness was called who testified that he found Muff Potter +washing in the brook, at an early hour of the morning that the murder +was discovered, and that he immediately sneaked away. After some +further questioning, counsel for the prosecution said: + +"Take the witness." + +The prisoner raised his eyes for a moment, but dropped them again when +his own counsel said: + +"I have no questions to ask him." + +The next witness proved the finding of the knife near the corpse. +Counsel for the prosecution said: + +"Take the witness." + +"I have no questions to ask him," Potter's lawyer replied. + +A third witness swore he had often seen the knife in Potter's +possession. + +"Take the witness." + +Counsel for Potter declined to question him. The faces of the audience +began to betray annoyance. Did this attorney mean to throw away his +client's life without an effort? + +Several witnesses deposed concerning Potter's guilty behavior when +brought to the scene of the murder. They were allowed to leave the +stand without being cross-questioned. + +Every detail of the damaging circumstances that occurred in the +graveyard upon that morning which all present remembered so well was +brought out by credible witnesses, but none of them were cross-examined +by Potter's lawyer. The perplexity and dissatisfaction of the house +expressed itself in murmurs and provoked a reproof from the bench. +Counsel for the prosecution now said: + +"By the oaths of citizens whose simple word is above suspicion, we +have fastened this awful crime, beyond all possibility of question, +upon the unhappy prisoner at the bar. We rest our case here." + +A groan escaped from poor Potter, and he put his face in his hands and +rocked his body softly to and fro, while a painful silence reigned in +the court-room. Many men were moved, and many women's compassion +testified itself in tears. Counsel for the defence rose and said: + +"Your honor, in our remarks at the opening of this trial, we +foreshadowed our purpose to prove that our client did this fearful deed +while under the influence of a blind and irresponsible delirium +produced by drink. We have changed our mind. We shall not offer that +plea." [Then to the clerk:] "Call Thomas Sawyer!" + +A puzzled amazement awoke in every face in the house, not even +excepting Potter's. Every eye fastened itself with wondering interest +upon Tom as he rose and took his place upon the stand. The boy looked +wild enough, for he was badly scared. The oath was administered. + +"Thomas Sawyer, where were you on the seventeenth of June, about the +hour of midnight?" + +Tom glanced at Injun Joe's iron face and his tongue failed him. The +audience listened breathless, but the words refused to come. After a +few moments, however, the boy got a little of his strength back, and +managed to put enough of it into his voice to make part of the house +hear: + +"In the graveyard!" + +"A little bit louder, please. Don't be afraid. You were--" + +"In the graveyard." + +A contemptuous smile flitted across Injun Joe's face. + +"Were you anywhere near Horse Williams' grave?" + +"Yes, sir." + +"Speak up--just a trifle louder. How near were you?" + +"Near as I am to you." + +"Were you hidden, or not?" + +"I was hid." + +"Where?" + +"Behind the elms that's on the edge of the grave." + +Injun Joe gave a barely perceptible start. + +"Any one with you?" + +"Yes, sir. I went there with--" + +"Wait--wait a moment. Never mind mentioning your companion's name. We +will produce him at the proper time. Did you carry anything there with +you." + +Tom hesitated and looked confused. + +"Speak out, my boy--don't be diffident. The truth is always +respectable. What did you take there?" + +"Only a--a--dead cat." + +There was a ripple of mirth, which the court checked. + +"We will produce the skeleton of that cat. Now, my boy, tell us +everything that occurred--tell it in your own way--don't skip anything, +and don't be afraid." + +Tom began--hesitatingly at first, but as he warmed to his subject his +words flowed more and more easily; in a little while every sound ceased +but his own voice; every eye fixed itself upon him; with parted lips +and bated breath the audience hung upon his words, taking no note of +time, rapt in the ghastly fascinations of the tale. The strain upon +pent emotion reached its climax when the boy said: + +"--and as the doctor fetched the board around and Muff Potter fell, +Injun Joe jumped with the knife and--" + +Crash! Quick as lightning the half-breed sprang for a window, tore his +way through all opposers, and was gone! + + + +CHAPTER XXIV + +TOM was a glittering hero once more--the pet of the old, the envy of +the young. His name even went into immortal print, for the village +paper magnified him. There were some that believed he would be +President, yet, if he escaped hanging. + +As usual, the fickle, unreasoning world took Muff Potter to its bosom +and fondled him as lavishly as it had abused him before. But that sort +of conduct is to the world's credit; therefore it is not well to find +fault with it. + +Tom's days were days of splendor and exultation to him, but his nights +were seasons of horror. Injun Joe infested all his dreams, and always +with doom in his eye. Hardly any temptation could persuade the boy to +stir abroad after nightfall. Poor Huck was in the same state of +wretchedness and terror, for Tom had told the whole story to the lawyer +the night before the great day of the trial, and Huck was sore afraid +that his share in the business might leak out, yet, notwithstanding +Injun Joe's flight had saved him the suffering of testifying in court. +The poor fellow had got the attorney to promise secrecy, but what of +that? Since Tom's harassed conscience had managed to drive him to the +lawyer's house by night and wring a dread tale from lips that had been +sealed with the dismalest and most formidable of oaths, Huck's +confidence in the human race was well-nigh obliterated. + +Daily Muff Potter's gratitude made Tom glad he had spoken; but nightly +he wished he had sealed up his tongue. + +Half the time Tom was afraid Injun Joe would never be captured; the +other half he was afraid he would be. He felt sure he never could draw +a safe breath again until that man was dead and he had seen the corpse. + +Rewards had been offered, the country had been scoured, but no Injun +Joe was found. One of those omniscient and awe-inspiring marvels, a +detective, came up from St. Louis, moused around, shook his head, +looked wise, and made that sort of astounding success which members of +that craft usually achieve. That is to say, he "found a clew." But you +can't hang a "clew" for murder, and so after that detective had got +through and gone home, Tom felt just as insecure as he was before. + +The slow days drifted on, and each left behind it a slightly lightened +weight of apprehension. + + + +CHAPTER XXV + +THERE comes a time in every rightly-constructed boy's life when he has +a raging desire to go somewhere and dig for hidden treasure. This +desire suddenly came upon Tom one day. He sallied out to find Joe +Harper, but failed of success. Next he sought Ben Rogers; he had gone +fishing. Presently he stumbled upon Huck Finn the Red-Handed. Huck +would answer. Tom took him to a private place and opened the matter to +him confidentially. Huck was willing. Huck was always willing to take a +hand in any enterprise that offered entertainment and required no +capital, for he had a troublesome superabundance of that sort of time +which is not money. "Where'll we dig?" said Huck. + +"Oh, most anywhere." + +"Why, is it hid all around?" + +"No, indeed it ain't. It's hid in mighty particular places, Huck +--sometimes on islands, sometimes in rotten chests under the end of a +limb of an old dead tree, just where the shadow falls at midnight; but +mostly under the floor in ha'nted houses." + +"Who hides it?" + +"Why, robbers, of course--who'd you reckon? Sunday-school +sup'rintendents?" + +"I don't know. If 'twas mine I wouldn't hide it; I'd spend it and have +a good time." + +"So would I. But robbers don't do that way. They always hide it and +leave it there." + +"Don't they come after it any more?" + +"No, they think they will, but they generally forget the marks, or +else they die. Anyway, it lays there a long time and gets rusty; and by +and by somebody finds an old yellow paper that tells how to find the +marks--a paper that's got to be ciphered over about a week because it's +mostly signs and hy'roglyphics." + +"Hyro--which?" + +"Hy'roglyphics--pictures and things, you know, that don't seem to mean +anything." + +"Have you got one of them papers, Tom?" + +"No." + +"Well then, how you going to find the marks?" + +"I don't want any marks. They always bury it under a ha'nted house or +on an island, or under a dead tree that's got one limb sticking out. +Well, we've tried Jackson's Island a little, and we can try it again +some time; and there's the old ha'nted house up the Still-House branch, +and there's lots of dead-limb trees--dead loads of 'em." + +"Is it under all of them?" + +"How you talk! No!" + +"Then how you going to know which one to go for?" + +"Go for all of 'em!" + +"Why, Tom, it'll take all summer." + +"Well, what of that? Suppose you find a brass pot with a hundred +dollars in it, all rusty and gray, or rotten chest full of di'monds. +How's that?" + +Huck's eyes glowed. + +"That's bully. Plenty bully enough for me. Just you gimme the hundred +dollars and I don't want no di'monds." + +"All right. But I bet you I ain't going to throw off on di'monds. Some +of 'em's worth twenty dollars apiece--there ain't any, hardly, but's +worth six bits or a dollar." + +"No! Is that so?" + +"Cert'nly--anybody'll tell you so. Hain't you ever seen one, Huck?" + +"Not as I remember." + +"Oh, kings have slathers of them." + +"Well, I don' know no kings, Tom." + +"I reckon you don't. But if you was to go to Europe you'd see a raft +of 'em hopping around." + +"Do they hop?" + +"Hop?--your granny! No!" + +"Well, what did you say they did, for?" + +"Shucks, I only meant you'd SEE 'em--not hopping, of course--what do +they want to hop for?--but I mean you'd just see 'em--scattered around, +you know, in a kind of a general way. Like that old humpbacked Richard." + +"Richard? What's his other name?" + +"He didn't have any other name. Kings don't have any but a given name." + +"No?" + +"But they don't." + +"Well, if they like it, Tom, all right; but I don't want to be a king +and have only just a given name, like a nigger. But say--where you +going to dig first?" + +"Well, I don't know. S'pose we tackle that old dead-limb tree on the +hill t'other side of Still-House branch?" + +"I'm agreed." + +So they got a crippled pick and a shovel, and set out on their +three-mile tramp. They arrived hot and panting, and threw themselves +down in the shade of a neighboring elm to rest and have a smoke. + +"I like this," said Tom. + +"So do I." + +"Say, Huck, if we find a treasure here, what you going to do with your +share?" + +"Well, I'll have pie and a glass of soda every day, and I'll go to +every circus that comes along. I bet I'll have a gay time." + +"Well, ain't you going to save any of it?" + +"Save it? What for?" + +"Why, so as to have something to live on, by and by." + +"Oh, that ain't any use. Pap would come back to thish-yer town some +day and get his claws on it if I didn't hurry up, and I tell you he'd +clean it out pretty quick. What you going to do with yourn, Tom?" + +"I'm going to buy a new drum, and a sure-'nough sword, and a red +necktie and a bull pup, and get married." + +"Married!" + +"That's it." + +"Tom, you--why, you ain't in your right mind." + +"Wait--you'll see." + +"Well, that's the foolishest thing you could do. Look at pap and my +mother. Fight! Why, they used to fight all the time. I remember, mighty +well." + +"That ain't anything. The girl I'm going to marry won't fight." + +"Tom, I reckon they're all alike. They'll all comb a body. Now you +better think 'bout this awhile. I tell you you better. What's the name +of the gal?" + +"It ain't a gal at all--it's a girl." + +"It's all the same, I reckon; some says gal, some says girl--both's +right, like enough. Anyway, what's her name, Tom?" + +"I'll tell you some time--not now." + +"All right--that'll do. Only if you get married I'll be more lonesomer +than ever." + +"No you won't. You'll come and live with me. Now stir out of this and +we'll go to digging." + +They worked and sweated for half an hour. No result. They toiled +another half-hour. Still no result. Huck said: + +"Do they always bury it as deep as this?" + +"Sometimes--not always. Not generally. I reckon we haven't got the +right place." + +So they chose a new spot and began again. The labor dragged a little, +but still they made progress. They pegged away in silence for some +time. Finally Huck leaned on his shovel, swabbed the beaded drops from +his brow with his sleeve, and said: + +"Where you going to dig next, after we get this one?" + +"I reckon maybe we'll tackle the old tree that's over yonder on +Cardiff Hill back of the widow's." + +"I reckon that'll be a good one. But won't the widow take it away from +us, Tom? It's on her land." + +"SHE take it away! Maybe she'd like to try it once. Whoever finds one +of these hid treasures, it belongs to him. It don't make any difference +whose land it's on." + +That was satisfactory. The work went on. By and by Huck said: + +"Blame it, we must be in the wrong place again. What do you think?" + +"It is mighty curious, Huck. I don't understand it. Sometimes witches +interfere. I reckon maybe that's what's the trouble now." + +"Shucks! Witches ain't got no power in the daytime." + +"Well, that's so. I didn't think of that. Oh, I know what the matter +is! What a blamed lot of fools we are! You got to find out where the +shadow of the limb falls at midnight, and that's where you dig!" + +"Then consound it, we've fooled away all this work for nothing. Now +hang it all, we got to come back in the night. It's an awful long way. +Can you get out?" + +"I bet I will. We've got to do it to-night, too, because if somebody +sees these holes they'll know in a minute what's here and they'll go +for it." + +"Well, I'll come around and maow to-night." + +"All right. Let's hide the tools in the bushes." + +The boys were there that night, about the appointed time. They sat in +the shadow waiting. It was a lonely place, and an hour made solemn by +old traditions. Spirits whispered in the rustling leaves, ghosts lurked +in the murky nooks, the deep baying of a hound floated up out of the +distance, an owl answered with his sepulchral note. The boys were +subdued by these solemnities, and talked little. By and by they judged +that twelve had come; they marked where the shadow fell, and began to +dig. Their hopes commenced to rise. Their interest grew stronger, and +their industry kept pace with it. The hole deepened and still deepened, +but every time their hearts jumped to hear the pick strike upon +something, they only suffered a new disappointment. It was only a stone +or a chunk. At last Tom said: + +"It ain't any use, Huck, we're wrong again." + +"Well, but we CAN'T be wrong. We spotted the shadder to a dot." + +"I know it, but then there's another thing." + +"What's that?". + +"Why, we only guessed at the time. Like enough it was too late or too +early." + +Huck dropped his shovel. + +"That's it," said he. "That's the very trouble. We got to give this +one up. We can't ever tell the right time, and besides this kind of +thing's too awful, here this time of night with witches and ghosts +a-fluttering around so. I feel as if something's behind me all the time; +and I'm afeard to turn around, becuz maybe there's others in front +a-waiting for a chance. I been creeping all over, ever since I got here." + +"Well, I've been pretty much so, too, Huck. They most always put in a +dead man when they bury a treasure under a tree, to look out for it." + +"Lordy!" + +"Yes, they do. I've always heard that." + +"Tom, I don't like to fool around much where there's dead people. A +body's bound to get into trouble with 'em, sure." + +"I don't like to stir 'em up, either. S'pose this one here was to +stick his skull out and say something!" + +"Don't Tom! It's awful." + +"Well, it just is. Huck, I don't feel comfortable a bit." + +"Say, Tom, let's give this place up, and try somewheres else." + +"All right, I reckon we better." + +"What'll it be?" + +Tom considered awhile; and then said: + +"The ha'nted house. That's it!" + +"Blame it, I don't like ha'nted houses, Tom. Why, they're a dern sight +worse'n dead people. Dead people might talk, maybe, but they don't come +sliding around in a shroud, when you ain't noticing, and peep over your +shoulder all of a sudden and grit their teeth, the way a ghost does. I +couldn't stand such a thing as that, Tom--nobody could." + +"Yes, but, Huck, ghosts don't travel around only at night. They won't +hender us from digging there in the daytime." + +"Well, that's so. But you know mighty well people don't go about that +ha'nted house in the day nor the night." + +"Well, that's mostly because they don't like to go where a man's been +murdered, anyway--but nothing's ever been seen around that house except +in the night--just some blue lights slipping by the windows--no regular +ghosts." + +"Well, where you see one of them blue lights flickering around, Tom, +you can bet there's a ghost mighty close behind it. It stands to +reason. Becuz you know that they don't anybody but ghosts use 'em." + +"Yes, that's so. But anyway they don't come around in the daytime, so +what's the use of our being afeard?" + +"Well, all right. We'll tackle the ha'nted house if you say so--but I +reckon it's taking chances." + +They had started down the hill by this time. There in the middle of +the moonlit valley below them stood the "ha'nted" house, utterly +isolated, its fences gone long ago, rank weeds smothering the very +doorsteps, the chimney crumbled to ruin, the window-sashes vacant, a +corner of the roof caved in. The boys gazed awhile, half expecting to +see a blue light flit past a window; then talking in a low tone, as +befitted the time and the circumstances, they struck far off to the +right, to give the haunted house a wide berth, and took their way +homeward through the woods that adorned the rearward side of Cardiff +Hill. + + + +CHAPTER XXVI + +ABOUT noon the next day the boys arrived at the dead tree; they had +come for their tools. Tom was impatient to go to the haunted house; +Huck was measurably so, also--but suddenly said: + +"Lookyhere, Tom, do you know what day it is?" + +Tom mentally ran over the days of the week, and then quickly lifted +his eyes with a startled look in them-- + +"My! I never once thought of it, Huck!" + +"Well, I didn't neither, but all at once it popped onto me that it was +Friday." + +"Blame it, a body can't be too careful, Huck. We might 'a' got into an +awful scrape, tackling such a thing on a Friday." + +"MIGHT! Better say we WOULD! There's some lucky days, maybe, but +Friday ain't." + +"Any fool knows that. I don't reckon YOU was the first that found it +out, Huck." + +"Well, I never said I was, did I? And Friday ain't all, neither. I had +a rotten bad dream last night--dreampt about rats." + +"No! Sure sign of trouble. Did they fight?" + +"No." + +"Well, that's good, Huck. When they don't fight it's only a sign that +there's trouble around, you know. All we got to do is to look mighty +sharp and keep out of it. We'll drop this thing for to-day, and play. +Do you know Robin Hood, Huck?" + +"No. Who's Robin Hood?" + +"Why, he was one of the greatest men that was ever in England--and the +best. He was a robber." + +"Cracky, I wisht I was. Who did he rob?" + +"Only sheriffs and bishops and rich people and kings, and such like. +But he never bothered the poor. He loved 'em. He always divided up with +'em perfectly square." + +"Well, he must 'a' been a brick." + +"I bet you he was, Huck. Oh, he was the noblest man that ever was. +They ain't any such men now, I can tell you. He could lick any man in +England, with one hand tied behind him; and he could take his yew bow +and plug a ten-cent piece every time, a mile and a half." + +"What's a YEW bow?" + +"I don't know. It's some kind of a bow, of course. And if he hit that +dime only on the edge he would set down and cry--and curse. But we'll +play Robin Hood--it's nobby fun. I'll learn you." + +"I'm agreed." + +So they played Robin Hood all the afternoon, now and then casting a +yearning eye down upon the haunted house and passing a remark about the +morrow's prospects and possibilities there. As the sun began to sink +into the west they took their way homeward athwart the long shadows of +the trees and soon were buried from sight in the forests of Cardiff +Hill. + +On Saturday, shortly after noon, the boys were at the dead tree again. +They had a smoke and a chat in the shade, and then dug a little in +their last hole, not with great hope, but merely because Tom said there +were so many cases where people had given up a treasure after getting +down within six inches of it, and then somebody else had come along and +turned it up with a single thrust of a shovel. The thing failed this +time, however, so the boys shouldered their tools and went away feeling +that they had not trifled with fortune, but had fulfilled all the +requirements that belong to the business of treasure-hunting. + +When they reached the haunted house there was something so weird and +grisly about the dead silence that reigned there under the baking sun, +and something so depressing about the loneliness and desolation of the +place, that they were afraid, for a moment, to venture in. Then they +crept to the door and took a trembling peep. They saw a weed-grown, +floorless room, unplastered, an ancient fireplace, vacant windows, a +ruinous staircase; and here, there, and everywhere hung ragged and +abandoned cobwebs. They presently entered, softly, with quickened +pulses, talking in whispers, ears alert to catch the slightest sound, +and muscles tense and ready for instant retreat. + +In a little while familiarity modified their fears and they gave the +place a critical and interested examination, rather admiring their own +boldness, and wondering at it, too. Next they wanted to look up-stairs. +This was something like cutting off retreat, but they got to daring +each other, and of course there could be but one result--they threw +their tools into a corner and made the ascent. Up there were the same +signs of decay. In one corner they found a closet that promised +mystery, but the promise was a fraud--there was nothing in it. Their +courage was up now and well in hand. They were about to go down and +begin work when-- + +"Sh!" said Tom. + +"What is it?" whispered Huck, blanching with fright. + +"Sh!... There!... Hear it?" + +"Yes!... Oh, my! Let's run!" + +"Keep still! Don't you budge! They're coming right toward the door." + +The boys stretched themselves upon the floor with their eyes to +knot-holes in the planking, and lay waiting, in a misery of fear. + +"They've stopped.... No--coming.... Here they are. Don't whisper +another word, Huck. My goodness, I wish I was out of this!" + +Two men entered. Each boy said to himself: "There's the old deaf and +dumb Spaniard that's been about town once or twice lately--never saw +t'other man before." + +"T'other" was a ragged, unkempt creature, with nothing very pleasant +in his face. The Spaniard was wrapped in a serape; he had bushy white +whiskers; long white hair flowed from under his sombrero, and he wore +green goggles. When they came in, "t'other" was talking in a low voice; +they sat down on the ground, facing the door, with their backs to the +wall, and the speaker continued his remarks. His manner became less +guarded and his words more distinct as he proceeded: + +"No," said he, "I've thought it all over, and I don't like it. It's +dangerous." + +"Dangerous!" grunted the "deaf and dumb" Spaniard--to the vast +surprise of the boys. "Milksop!" + +This voice made the boys gasp and quake. It was Injun Joe's! There was +silence for some time. Then Joe said: + +"What's any more dangerous than that job up yonder--but nothing's come +of it." + +"That's different. Away up the river so, and not another house about. +'Twon't ever be known that we tried, anyway, long as we didn't succeed." + +"Well, what's more dangerous than coming here in the daytime!--anybody +would suspicion us that saw us." + +"I know that. But there warn't any other place as handy after that +fool of a job. I want to quit this shanty. I wanted to yesterday, only +it warn't any use trying to stir out of here, with those infernal boys +playing over there on the hill right in full view." + +"Those infernal boys" quaked again under the inspiration of this +remark, and thought how lucky it was that they had remembered it was +Friday and concluded to wait a day. They wished in their hearts they +had waited a year. + +The two men got out some food and made a luncheon. After a long and +thoughtful silence, Injun Joe said: + +"Look here, lad--you go back up the river where you belong. Wait there +till you hear from me. I'll take the chances on dropping into this town +just once more, for a look. We'll do that 'dangerous' job after I've +spied around a little and think things look well for it. Then for +Texas! We'll leg it together!" + +This was satisfactory. Both men presently fell to yawning, and Injun +Joe said: + +"I'm dead for sleep! It's your turn to watch." + +He curled down in the weeds and soon began to snore. His comrade +stirred him once or twice and he became quiet. Presently the watcher +began to nod; his head drooped lower and lower, both men began to snore +now. + +The boys drew a long, grateful breath. Tom whispered: + +"Now's our chance--come!" + +Huck said: + +"I can't--I'd die if they was to wake." + +Tom urged--Huck held back. At last Tom rose slowly and softly, and +started alone. But the first step he made wrung such a hideous creak +from the crazy floor that he sank down almost dead with fright. He +never made a second attempt. The boys lay there counting the dragging +moments till it seemed to them that time must be done and eternity +growing gray; and then they were grateful to note that at last the sun +was setting. + +Now one snore ceased. Injun Joe sat up, stared around--smiled grimly +upon his comrade, whose head was drooping upon his knees--stirred him +up with his foot and said: + +"Here! YOU'RE a watchman, ain't you! All right, though--nothing's +happened." + +"My! have I been asleep?" + +"Oh, partly, partly. Nearly time for us to be moving, pard. What'll we +do with what little swag we've got left?" + +"I don't know--leave it here as we've always done, I reckon. No use to +take it away till we start south. Six hundred and fifty in silver's +something to carry." + +"Well--all right--it won't matter to come here once more." + +"No--but I'd say come in the night as we used to do--it's better." + +"Yes: but look here; it may be a good while before I get the right +chance at that job; accidents might happen; 'tain't in such a very good +place; we'll just regularly bury it--and bury it deep." + +"Good idea," said the comrade, who walked across the room, knelt down, +raised one of the rearward hearth-stones and took out a bag that +jingled pleasantly. He subtracted from it twenty or thirty dollars for +himself and as much for Injun Joe, and passed the bag to the latter, +who was on his knees in the corner, now, digging with his bowie-knife. + +The boys forgot all their fears, all their miseries in an instant. +With gloating eyes they watched every movement. Luck!--the splendor of +it was beyond all imagination! Six hundred dollars was money enough to +make half a dozen boys rich! Here was treasure-hunting under the +happiest auspices--there would not be any bothersome uncertainty as to +where to dig. They nudged each other every moment--eloquent nudges and +easily understood, for they simply meant--"Oh, but ain't you glad NOW +we're here!" + +Joe's knife struck upon something. + +"Hello!" said he. + +"What is it?" said his comrade. + +"Half-rotten plank--no, it's a box, I believe. Here--bear a hand and +we'll see what it's here for. Never mind, I've broke a hole." + +He reached his hand in and drew it out-- + +"Man, it's money!" + +The two men examined the handful of coins. They were gold. The boys +above were as excited as themselves, and as delighted. + +Joe's comrade said: + +"We'll make quick work of this. There's an old rusty pick over amongst +the weeds in the corner the other side of the fireplace--I saw it a +minute ago." + +He ran and brought the boys' pick and shovel. Injun Joe took the pick, +looked it over critically, shook his head, muttered something to +himself, and then began to use it. The box was soon unearthed. It was +not very large; it was iron bound and had been very strong before the +slow years had injured it. The men contemplated the treasure awhile in +blissful silence. + +"Pard, there's thousands of dollars here," said Injun Joe. + +"'Twas always said that Murrel's gang used to be around here one +summer," the stranger observed. + +"I know it," said Injun Joe; "and this looks like it, I should say." + +"Now you won't need to do that job." + +The half-breed frowned. Said he: + +"You don't know me. Least you don't know all about that thing. 'Tain't +robbery altogether--it's REVENGE!" and a wicked light flamed in his +eyes. "I'll need your help in it. When it's finished--then Texas. Go +home to your Nance and your kids, and stand by till you hear from me." + +"Well--if you say so; what'll we do with this--bury it again?" + +"Yes. [Ravishing delight overhead.] NO! by the great Sachem, no! +[Profound distress overhead.] I'd nearly forgot. That pick had fresh +earth on it! [The boys were sick with terror in a moment.] What +business has a pick and a shovel here? What business with fresh earth +on them? Who brought them here--and where are they gone? Have you heard +anybody?--seen anybody? What! bury it again and leave them to come and +see the ground disturbed? Not exactly--not exactly. We'll take it to my +den." + +"Why, of course! Might have thought of that before. You mean Number +One?" + +"No--Number Two--under the cross. The other place is bad--too common." + +"All right. It's nearly dark enough to start." + +Injun Joe got up and went about from window to window cautiously +peeping out. Presently he said: + +"Who could have brought those tools here? Do you reckon they can be +up-stairs?" + +The boys' breath forsook them. Injun Joe put his hand on his knife, +halted a moment, undecided, and then turned toward the stairway. The +boys thought of the closet, but their strength was gone. The steps came +creaking up the stairs--the intolerable distress of the situation woke +the stricken resolution of the lads--they were about to spring for the +closet, when there was a crash of rotten timbers and Injun Joe landed +on the ground amid the debris of the ruined stairway. He gathered +himself up cursing, and his comrade said: + +"Now what's the use of all that? If it's anybody, and they're up +there, let them STAY there--who cares? If they want to jump down, now, +and get into trouble, who objects? It will be dark in fifteen minutes +--and then let them follow us if they want to. I'm willing. In my +opinion, whoever hove those things in here caught a sight of us and +took us for ghosts or devils or something. I'll bet they're running +yet." + +Joe grumbled awhile; then he agreed with his friend that what daylight +was left ought to be economized in getting things ready for leaving. +Shortly afterward they slipped out of the house in the deepening +twilight, and moved toward the river with their precious box. + +Tom and Huck rose up, weak but vastly relieved, and stared after them +through the chinks between the logs of the house. Follow? Not they. +They were content to reach ground again without broken necks, and take +the townward track over the hill. They did not talk much. They were too +much absorbed in hating themselves--hating the ill luck that made them +take the spade and the pick there. But for that, Injun Joe never would +have suspected. He would have hidden the silver with the gold to wait +there till his "revenge" was satisfied, and then he would have had the +misfortune to find that money turn up missing. Bitter, bitter luck that +the tools were ever brought there! + +They resolved to keep a lookout for that Spaniard when he should come +to town spying out for chances to do his revengeful job, and follow him +to "Number Two," wherever that might be. Then a ghastly thought +occurred to Tom. + +"Revenge? What if he means US, Huck!" + +"Oh, don't!" said Huck, nearly fainting. + +They talked it all over, and as they entered town they agreed to +believe that he might possibly mean somebody else--at least that he +might at least mean nobody but Tom, since only Tom had testified. + +Very, very small comfort it was to Tom to be alone in danger! Company +would be a palpable improvement, he thought. + + + +CHAPTER XXVII + +THE adventure of the day mightily tormented Tom's dreams that night. +Four times he had his hands on that rich treasure and four times it +wasted to nothingness in his fingers as sleep forsook him and +wakefulness brought back the hard reality of his misfortune. As he lay +in the early morning recalling the incidents of his great adventure, he +noticed that they seemed curiously subdued and far away--somewhat as if +they had happened in another world, or in a time long gone by. Then it +occurred to him that the great adventure itself must be a dream! There +was one very strong argument in favor of this idea--namely, that the +quantity of coin he had seen was too vast to be real. He had never seen +as much as fifty dollars in one mass before, and he was like all boys +of his age and station in life, in that he imagined that all references +to "hundreds" and "thousands" were mere fanciful forms of speech, and +that no such sums really existed in the world. He never had supposed +for a moment that so large a sum as a hundred dollars was to be found +in actual money in any one's possession. If his notions of hidden +treasure had been analyzed, they would have been found to consist of a +handful of real dimes and a bushel of vague, splendid, ungraspable +dollars. + +But the incidents of his adventure grew sensibly sharper and clearer +under the attrition of thinking them over, and so he presently found +himself leaning to the impression that the thing might not have been a +dream, after all. This uncertainty must be swept away. He would snatch +a hurried breakfast and go and find Huck. Huck was sitting on the +gunwale of a flatboat, listlessly dangling his feet in the water and +looking very melancholy. Tom concluded to let Huck lead up to the +subject. If he did not do it, then the adventure would be proved to +have been only a dream. + +"Hello, Huck!" + +"Hello, yourself." + +Silence, for a minute. + +"Tom, if we'd 'a' left the blame tools at the dead tree, we'd 'a' got +the money. Oh, ain't it awful!" + +"'Tain't a dream, then, 'tain't a dream! Somehow I most wish it was. +Dog'd if I don't, Huck." + +"What ain't a dream?" + +"Oh, that thing yesterday. I been half thinking it was." + +"Dream! If them stairs hadn't broke down you'd 'a' seen how much dream +it was! I've had dreams enough all night--with that patch-eyed Spanish +devil going for me all through 'em--rot him!" + +"No, not rot him. FIND him! Track the money!" + +"Tom, we'll never find him. A feller don't have only one chance for +such a pile--and that one's lost. I'd feel mighty shaky if I was to see +him, anyway." + +"Well, so'd I; but I'd like to see him, anyway--and track him out--to +his Number Two." + +"Number Two--yes, that's it. I been thinking 'bout that. But I can't +make nothing out of it. What do you reckon it is?" + +"I dono. It's too deep. Say, Huck--maybe it's the number of a house!" + +"Goody!... No, Tom, that ain't it. If it is, it ain't in this +one-horse town. They ain't no numbers here." + +"Well, that's so. Lemme think a minute. Here--it's the number of a +room--in a tavern, you know!" + +"Oh, that's the trick! They ain't only two taverns. We can find out +quick." + +"You stay here, Huck, till I come." + +Tom was off at once. He did not care to have Huck's company in public +places. He was gone half an hour. He found that in the best tavern, No. +2 had long been occupied by a young lawyer, and was still so occupied. +In the less ostentatious house, No. 2 was a mystery. The +tavern-keeper's young son said it was kept locked all the time, and he +never saw anybody go into it or come out of it except at night; he did +not know any particular reason for this state of things; had had some +little curiosity, but it was rather feeble; had made the most of the +mystery by entertaining himself with the idea that that room was +"ha'nted"; had noticed that there was a light in there the night before. + +"That's what I've found out, Huck. I reckon that's the very No. 2 +we're after." + +"I reckon it is, Tom. Now what you going to do?" + +"Lemme think." + +Tom thought a long time. Then he said: + +"I'll tell you. The back door of that No. 2 is the door that comes out +into that little close alley between the tavern and the old rattle trap +of a brick store. Now you get hold of all the door-keys you can find, +and I'll nip all of auntie's, and the first dark night we'll go there +and try 'em. And mind you, keep a lookout for Injun Joe, because he +said he was going to drop into town and spy around once more for a +chance to get his revenge. If you see him, you just follow him; and if +he don't go to that No. 2, that ain't the place." + +"Lordy, I don't want to foller him by myself!" + +"Why, it'll be night, sure. He mightn't ever see you--and if he did, +maybe he'd never think anything." + +"Well, if it's pretty dark I reckon I'll track him. I dono--I dono. +I'll try." + +"You bet I'll follow him, if it's dark, Huck. Why, he might 'a' found +out he couldn't get his revenge, and be going right after that money." + +"It's so, Tom, it's so. I'll foller him; I will, by jingoes!" + +"Now you're TALKING! Don't you ever weaken, Huck, and I won't." + + + +CHAPTER XXVIII + +THAT night Tom and Huck were ready for their adventure. They hung +about the neighborhood of the tavern until after nine, one watching the +alley at a distance and the other the tavern door. Nobody entered the +alley or left it; nobody resembling the Spaniard entered or left the +tavern door. The night promised to be a fair one; so Tom went home with +the understanding that if a considerable degree of darkness came on, +Huck was to come and "maow," whereupon he would slip out and try the +keys. But the night remained clear, and Huck closed his watch and +retired to bed in an empty sugar hogshead about twelve. + +Tuesday the boys had the same ill luck. Also Wednesday. But Thursday +night promised better. Tom slipped out in good season with his aunt's +old tin lantern, and a large towel to blindfold it with. He hid the +lantern in Huck's sugar hogshead and the watch began. An hour before +midnight the tavern closed up and its lights (the only ones +thereabouts) were put out. No Spaniard had been seen. Nobody had +entered or left the alley. Everything was auspicious. The blackness of +darkness reigned, the perfect stillness was interrupted only by +occasional mutterings of distant thunder. + +Tom got his lantern, lit it in the hogshead, wrapped it closely in the +towel, and the two adventurers crept in the gloom toward the tavern. +Huck stood sentry and Tom felt his way into the alley. Then there was a +season of waiting anxiety that weighed upon Huck's spirits like a +mountain. He began to wish he could see a flash from the lantern--it +would frighten him, but it would at least tell him that Tom was alive +yet. It seemed hours since Tom had disappeared. Surely he must have +fainted; maybe he was dead; maybe his heart had burst under terror and +excitement. In his uneasiness Huck found himself drawing closer and +closer to the alley; fearing all sorts of dreadful things, and +momentarily expecting some catastrophe to happen that would take away +his breath. There was not much to take away, for he seemed only able to +inhale it by thimblefuls, and his heart would soon wear itself out, the +way it was beating. Suddenly there was a flash of light and Tom came +tearing by him: "Run!" said he; "run, for your life!" + +He needn't have repeated it; once was enough; Huck was making thirty +or forty miles an hour before the repetition was uttered. The boys +never stopped till they reached the shed of a deserted slaughter-house +at the lower end of the village. Just as they got within its shelter +the storm burst and the rain poured down. As soon as Tom got his breath +he said: + +"Huck, it was awful! I tried two of the keys, just as soft as I could; +but they seemed to make such a power of racket that I couldn't hardly +get my breath I was so scared. They wouldn't turn in the lock, either. +Well, without noticing what I was doing, I took hold of the knob, and +open comes the door! It warn't locked! I hopped in, and shook off the +towel, and, GREAT CAESAR'S GHOST!" + +"What!--what'd you see, Tom?" + +"Huck, I most stepped onto Injun Joe's hand!" + +"No!" + +"Yes! He was lying there, sound asleep on the floor, with his old +patch on his eye and his arms spread out." + +"Lordy, what did you do? Did he wake up?" + +"No, never budged. Drunk, I reckon. I just grabbed that towel and +started!" + +"I'd never 'a' thought of the towel, I bet!" + +"Well, I would. My aunt would make me mighty sick if I lost it." + +"Say, Tom, did you see that box?" + +"Huck, I didn't wait to look around. I didn't see the box, I didn't +see the cross. I didn't see anything but a bottle and a tin cup on the +floor by Injun Joe; yes, I saw two barrels and lots more bottles in the +room. Don't you see, now, what's the matter with that ha'nted room?" + +"How?" + +"Why, it's ha'nted with whiskey! Maybe ALL the Temperance Taverns have +got a ha'nted room, hey, Huck?" + +"Well, I reckon maybe that's so. Who'd 'a' thought such a thing? But +say, Tom, now's a mighty good time to get that box, if Injun Joe's +drunk." + +"It is, that! You try it!" + +Huck shuddered. + +"Well, no--I reckon not." + +"And I reckon not, Huck. Only one bottle alongside of Injun Joe ain't +enough. If there'd been three, he'd be drunk enough and I'd do it." + +There was a long pause for reflection, and then Tom said: + +"Lookyhere, Huck, less not try that thing any more till we know Injun +Joe's not in there. It's too scary. Now, if we watch every night, we'll +be dead sure to see him go out, some time or other, and then we'll +snatch that box quicker'n lightning." + +"Well, I'm agreed. I'll watch the whole night long, and I'll do it +every night, too, if you'll do the other part of the job." + +"All right, I will. All you got to do is to trot up Hooper Street a +block and maow--and if I'm asleep, you throw some gravel at the window +and that'll fetch me." + +"Agreed, and good as wheat!" + +"Now, Huck, the storm's over, and I'll go home. It'll begin to be +daylight in a couple of hours. You go back and watch that long, will +you?" + +"I said I would, Tom, and I will. I'll ha'nt that tavern every night +for a year! I'll sleep all day and I'll stand watch all night." + +"That's all right. Now, where you going to sleep?" + +"In Ben Rogers' hayloft. He lets me, and so does his pap's nigger man, +Uncle Jake. I tote water for Uncle Jake whenever he wants me to, and +any time I ask him he gives me a little something to eat if he can +spare it. That's a mighty good nigger, Tom. He likes me, becuz I don't +ever act as if I was above him. Sometime I've set right down and eat +WITH him. But you needn't tell that. A body's got to do things when +he's awful hungry he wouldn't want to do as a steady thing." + +"Well, if I don't want you in the daytime, I'll let you sleep. I won't +come bothering around. Any time you see something's up, in the night, +just skip right around and maow." + + + +CHAPTER XXIX + +THE first thing Tom heard on Friday morning was a glad piece of news +--Judge Thatcher's family had come back to town the night before. Both +Injun Joe and the treasure sunk into secondary importance for a moment, +and Becky took the chief place in the boy's interest. He saw her and +they had an exhausting good time playing "hi-spy" and "gully-keeper" +with a crowd of their school-mates. The day was completed and crowned +in a peculiarly satisfactory way: Becky teased her mother to appoint +the next day for the long-promised and long-delayed picnic, and she +consented. The child's delight was boundless; and Tom's not more +moderate. The invitations were sent out before sunset, and straightway +the young folks of the village were thrown into a fever of preparation +and pleasurable anticipation. Tom's excitement enabled him to keep +awake until a pretty late hour, and he had good hopes of hearing Huck's +"maow," and of having his treasure to astonish Becky and the picnickers +with, next day; but he was disappointed. No signal came that night. + +Morning came, eventually, and by ten or eleven o'clock a giddy and +rollicking company were gathered at Judge Thatcher's, and everything +was ready for a start. It was not the custom for elderly people to mar +the picnics with their presence. The children were considered safe +enough under the wings of a few young ladies of eighteen and a few +young gentlemen of twenty-three or thereabouts. The old steam ferryboat +was chartered for the occasion; presently the gay throng filed up the +main street laden with provision-baskets. Sid was sick and had to miss +the fun; Mary remained at home to entertain him. The last thing Mrs. +Thatcher said to Becky, was: + +"You'll not get back till late. Perhaps you'd better stay all night +with some of the girls that live near the ferry-landing, child." + +"Then I'll stay with Susy Harper, mamma." + +"Very well. And mind and behave yourself and don't be any trouble." + +Presently, as they tripped along, Tom said to Becky: + +"Say--I'll tell you what we'll do. 'Stead of going to Joe Harper's +we'll climb right up the hill and stop at the Widow Douglas'. She'll +have ice-cream! She has it most every day--dead loads of it. And she'll +be awful glad to have us." + +"Oh, that will be fun!" + +Then Becky reflected a moment and said: + +"But what will mamma say?" + +"How'll she ever know?" + +The girl turned the idea over in her mind, and said reluctantly: + +"I reckon it's wrong--but--" + +"But shucks! Your mother won't know, and so what's the harm? All she +wants is that you'll be safe; and I bet you she'd 'a' said go there if +she'd 'a' thought of it. I know she would!" + +The Widow Douglas' splendid hospitality was a tempting bait. It and +Tom's persuasions presently carried the day. So it was decided to say +nothing anybody about the night's programme. Presently it occurred to +Tom that maybe Huck might come this very night and give the signal. The +thought took a deal of the spirit out of his anticipations. Still he +could not bear to give up the fun at Widow Douglas'. And why should he +give it up, he reasoned--the signal did not come the night before, so +why should it be any more likely to come to-night? The sure fun of the +evening outweighed the uncertain treasure; and, boy-like, he determined +to yield to the stronger inclination and not allow himself to think of +the box of money another time that day. + +Three miles below town the ferryboat stopped at the mouth of a woody +hollow and tied up. The crowd swarmed ashore and soon the forest +distances and craggy heights echoed far and near with shoutings and +laughter. All the different ways of getting hot and tired were gone +through with, and by-and-by the rovers straggled back to camp fortified +with responsible appetites, and then the destruction of the good things +began. After the feast there was a refreshing season of rest and chat +in the shade of spreading oaks. By-and-by somebody shouted: + +"Who's ready for the cave?" + +Everybody was. Bundles of candles were procured, and straightway there +was a general scamper up the hill. The mouth of the cave was up the +hillside--an opening shaped like a letter A. Its massive oaken door +stood unbarred. Within was a small chamber, chilly as an ice-house, and +walled by Nature with solid limestone that was dewy with a cold sweat. +It was romantic and mysterious to stand here in the deep gloom and look +out upon the green valley shining in the sun. But the impressiveness of +the situation quickly wore off, and the romping began again. The moment +a candle was lighted there was a general rush upon the owner of it; a +struggle and a gallant defence followed, but the candle was soon +knocked down or blown out, and then there was a glad clamor of laughter +and a new chase. But all things have an end. By-and-by the procession +went filing down the steep descent of the main avenue, the flickering +rank of lights dimly revealing the lofty walls of rock almost to their +point of junction sixty feet overhead. This main avenue was not more +than eight or ten feet wide. Every few steps other lofty and still +narrower crevices branched from it on either hand--for McDougal's cave +was but a vast labyrinth of crooked aisles that ran into each other and +out again and led nowhere. It was said that one might wander days and +nights together through its intricate tangle of rifts and chasms, and +never find the end of the cave; and that he might go down, and down, +and still down, into the earth, and it was just the same--labyrinth +under labyrinth, and no end to any of them. No man "knew" the cave. +That was an impossible thing. Most of the young men knew a portion of +it, and it was not customary to venture much beyond this known portion. +Tom Sawyer knew as much of the cave as any one. + +The procession moved along the main avenue some three-quarters of a +mile, and then groups and couples began to slip aside into branch +avenues, fly along the dismal corridors, and take each other by +surprise at points where the corridors joined again. Parties were able +to elude each other for the space of half an hour without going beyond +the "known" ground. + +By-and-by, one group after another came straggling back to the mouth +of the cave, panting, hilarious, smeared from head to foot with tallow +drippings, daubed with clay, and entirely delighted with the success of +the day. Then they were astonished to find that they had been taking no +note of time and that night was about at hand. The clanging bell had +been calling for half an hour. However, this sort of close to the day's +adventures was romantic and therefore satisfactory. When the ferryboat +with her wild freight pushed into the stream, nobody cared sixpence for +the wasted time but the captain of the craft. + +Huck was already upon his watch when the ferryboat's lights went +glinting past the wharf. He heard no noise on board, for the young +people were as subdued and still as people usually are who are nearly +tired to death. He wondered what boat it was, and why she did not stop +at the wharf--and then he dropped her out of his mind and put his +attention upon his business. The night was growing cloudy and dark. Ten +o'clock came, and the noise of vehicles ceased, scattered lights began +to wink out, all straggling foot-passengers disappeared, the village +betook itself to its slumbers and left the small watcher alone with the +silence and the ghosts. Eleven o'clock came, and the tavern lights were +put out; darkness everywhere, now. Huck waited what seemed a weary long +time, but nothing happened. His faith was weakening. Was there any use? +Was there really any use? Why not give it up and turn in? + +A noise fell upon his ear. He was all attention in an instant. The +alley door closed softly. He sprang to the corner of the brick store. +The next moment two men brushed by him, and one seemed to have +something under his arm. It must be that box! So they were going to +remove the treasure. Why call Tom now? It would be absurd--the men +would get away with the box and never be found again. No, he would +stick to their wake and follow them; he would trust to the darkness for +security from discovery. So communing with himself, Huck stepped out +and glided along behind the men, cat-like, with bare feet, allowing +them to keep just far enough ahead not to be invisible. + +They moved up the river street three blocks, then turned to the left +up a cross-street. They went straight ahead, then, until they came to +the path that led up Cardiff Hill; this they took. They passed by the +old Welshman's house, half-way up the hill, without hesitating, and +still climbed upward. Good, thought Huck, they will bury it in the old +quarry. But they never stopped at the quarry. They passed on, up the +summit. They plunged into the narrow path between the tall sumach +bushes, and were at once hidden in the gloom. Huck closed up and +shortened his distance, now, for they would never be able to see him. +He trotted along awhile; then slackened his pace, fearing he was +gaining too fast; moved on a piece, then stopped altogether; listened; +no sound; none, save that he seemed to hear the beating of his own +heart. The hooting of an owl came over the hill--ominous sound! But no +footsteps. Heavens, was everything lost! He was about to spring with +winged feet, when a man cleared his throat not four feet from him! +Huck's heart shot into his throat, but he swallowed it again; and then +he stood there shaking as if a dozen agues had taken charge of him at +once, and so weak that he thought he must surely fall to the ground. He +knew where he was. He knew he was within five steps of the stile +leading into Widow Douglas' grounds. Very well, he thought, let them +bury it there; it won't be hard to find. + +Now there was a voice--a very low voice--Injun Joe's: + +"Damn her, maybe she's got company--there's lights, late as it is." + +"I can't see any." + +This was that stranger's voice--the stranger of the haunted house. A +deadly chill went to Huck's heart--this, then, was the "revenge" job! +His thought was, to fly. Then he remembered that the Widow Douglas had +been kind to him more than once, and maybe these men were going to +murder her. He wished he dared venture to warn her; but he knew he +didn't dare--they might come and catch him. He thought all this and +more in the moment that elapsed between the stranger's remark and Injun +Joe's next--which was-- + +"Because the bush is in your way. Now--this way--now you see, don't +you?" + +"Yes. Well, there IS company there, I reckon. Better give it up." + +"Give it up, and I just leaving this country forever! Give it up and +maybe never have another chance. I tell you again, as I've told you +before, I don't care for her swag--you may have it. But her husband was +rough on me--many times he was rough on me--and mainly he was the +justice of the peace that jugged me for a vagrant. And that ain't all. +It ain't a millionth part of it! He had me HORSEWHIPPED!--horsewhipped +in front of the jail, like a nigger!--with all the town looking on! +HORSEWHIPPED!--do you understand? He took advantage of me and died. But +I'll take it out of HER." + +"Oh, don't kill her! Don't do that!" + +"Kill? Who said anything about killing? I would kill HIM if he was +here; but not her. When you want to get revenge on a woman you don't +kill her--bosh! you go for her looks. You slit her nostrils--you notch +her ears like a sow!" + +"By God, that's--" + +"Keep your opinion to yourself! It will be safest for you. I'll tie +her to the bed. If she bleeds to death, is that my fault? I'll not cry, +if she does. My friend, you'll help me in this thing--for MY sake +--that's why you're here--I mightn't be able alone. If you flinch, I'll +kill you. Do you understand that? And if I have to kill you, I'll kill +her--and then I reckon nobody'll ever know much about who done this +business." + +"Well, if it's got to be done, let's get at it. The quicker the +better--I'm all in a shiver." + +"Do it NOW? And company there? Look here--I'll get suspicious of you, +first thing you know. No--we'll wait till the lights are out--there's +no hurry." + +Huck felt that a silence was going to ensue--a thing still more awful +than any amount of murderous talk; so he held his breath and stepped +gingerly back; planted his foot carefully and firmly, after balancing, +one-legged, in a precarious way and almost toppling over, first on one +side and then on the other. He took another step back, with the same +elaboration and the same risks; then another and another, and--a twig +snapped under his foot! His breath stopped and he listened. There was +no sound--the stillness was perfect. His gratitude was measureless. Now +he turned in his tracks, between the walls of sumach bushes--turned +himself as carefully as if he were a ship--and then stepped quickly but +cautiously along. When he emerged at the quarry he felt secure, and so +he picked up his nimble heels and flew. Down, down he sped, till he +reached the Welshman's. He banged at the door, and presently the heads +of the old man and his two stalwart sons were thrust from windows. + +"What's the row there? Who's banging? What do you want?" + +"Let me in--quick! I'll tell everything." + +"Why, who are you?" + +"Huckleberry Finn--quick, let me in!" + +"Huckleberry Finn, indeed! It ain't a name to open many doors, I +judge! But let him in, lads, and let's see what's the trouble." + +"Please don't ever tell I told you," were Huck's first words when he +got in. "Please don't--I'd be killed, sure--but the widow's been good +friends to me sometimes, and I want to tell--I WILL tell if you'll +promise you won't ever say it was me." + +"By George, he HAS got something to tell, or he wouldn't act so!" +exclaimed the old man; "out with it and nobody here'll ever tell, lad." + +Three minutes later the old man and his sons, well armed, were up the +hill, and just entering the sumach path on tiptoe, their weapons in +their hands. Huck accompanied them no further. He hid behind a great +bowlder and fell to listening. There was a lagging, anxious silence, +and then all of a sudden there was an explosion of firearms and a cry. + +Huck waited for no particulars. He sprang away and sped down the hill +as fast as his legs could carry him. + + + +CHAPTER XXX + +AS the earliest suspicion of dawn appeared on Sunday morning, Huck +came groping up the hill and rapped gently at the old Welshman's door. +The inmates were asleep, but it was a sleep that was set on a +hair-trigger, on account of the exciting episode of the night. A call +came from a window: + +"Who's there!" + +Huck's scared voice answered in a low tone: + +"Please let me in! It's only Huck Finn!" + +"It's a name that can open this door night or day, lad!--and welcome!" + +These were strange words to the vagabond boy's ears, and the +pleasantest he had ever heard. He could not recollect that the closing +word had ever been applied in his case before. The door was quickly +unlocked, and he entered. Huck was given a seat and the old man and his +brace of tall sons speedily dressed themselves. + +"Now, my boy, I hope you're good and hungry, because breakfast will be +ready as soon as the sun's up, and we'll have a piping hot one, too +--make yourself easy about that! I and the boys hoped you'd turn up and +stop here last night." + +"I was awful scared," said Huck, "and I run. I took out when the +pistols went off, and I didn't stop for three mile. I've come now becuz +I wanted to know about it, you know; and I come before daylight becuz I +didn't want to run across them devils, even if they was dead." + +"Well, poor chap, you do look as if you'd had a hard night of it--but +there's a bed here for you when you've had your breakfast. No, they +ain't dead, lad--we are sorry enough for that. You see we knew right +where to put our hands on them, by your description; so we crept along +on tiptoe till we got within fifteen feet of them--dark as a cellar +that sumach path was--and just then I found I was going to sneeze. It +was the meanest kind of luck! I tried to keep it back, but no use +--'twas bound to come, and it did come! I was in the lead with my pistol +raised, and when the sneeze started those scoundrels a-rustling to get +out of the path, I sung out, 'Fire boys!' and blazed away at the place +where the rustling was. So did the boys. But they were off in a jiffy, +those villains, and we after them, down through the woods. I judge we +never touched them. They fired a shot apiece as they started, but their +bullets whizzed by and didn't do us any harm. As soon as we lost the +sound of their feet we quit chasing, and went down and stirred up the +constables. They got a posse together, and went off to guard the river +bank, and as soon as it is light the sheriff and a gang are going to +beat up the woods. My boys will be with them presently. I wish we had +some sort of description of those rascals--'twould help a good deal. +But you couldn't see what they were like, in the dark, lad, I suppose?" + +"Oh yes; I saw them down-town and follered them." + +"Splendid! Describe them--describe them, my boy!" + +"One's the old deaf and dumb Spaniard that's ben around here once or +twice, and t'other's a mean-looking, ragged--" + +"That's enough, lad, we know the men! Happened on them in the woods +back of the widow's one day, and they slunk away. Off with you, boys, +and tell the sheriff--get your breakfast to-morrow morning!" + +The Welshman's sons departed at once. As they were leaving the room +Huck sprang up and exclaimed: + +"Oh, please don't tell ANYbody it was me that blowed on them! Oh, +please!" + +"All right if you say it, Huck, but you ought to have the credit of +what you did." + +"Oh no, no! Please don't tell!" + +When the young men were gone, the old Welshman said: + +"They won't tell--and I won't. But why don't you want it known?" + +Huck would not explain, further than to say that he already knew too +much about one of those men and would not have the man know that he +knew anything against him for the whole world--he would be killed for +knowing it, sure. + +The old man promised secrecy once more, and said: + +"How did you come to follow these fellows, lad? Were they looking +suspicious?" + +Huck was silent while he framed a duly cautious reply. Then he said: + +"Well, you see, I'm a kind of a hard lot,--least everybody says so, +and I don't see nothing agin it--and sometimes I can't sleep much, on +account of thinking about it and sort of trying to strike out a new way +of doing. That was the way of it last night. I couldn't sleep, and so I +come along up-street 'bout midnight, a-turning it all over, and when I +got to that old shackly brick store by the Temperance Tavern, I backed +up agin the wall to have another think. Well, just then along comes +these two chaps slipping along close by me, with something under their +arm, and I reckoned they'd stole it. One was a-smoking, and t'other one +wanted a light; so they stopped right before me and the cigars lit up +their faces and I see that the big one was the deaf and dumb Spaniard, +by his white whiskers and the patch on his eye, and t'other one was a +rusty, ragged-looking devil." + +"Could you see the rags by the light of the cigars?" + +This staggered Huck for a moment. Then he said: + +"Well, I don't know--but somehow it seems as if I did." + +"Then they went on, and you--" + +"Follered 'em--yes. That was it. I wanted to see what was up--they +sneaked along so. I dogged 'em to the widder's stile, and stood in the +dark and heard the ragged one beg for the widder, and the Spaniard +swear he'd spile her looks just as I told you and your two--" + +"What! The DEAF AND DUMB man said all that!" + +Huck had made another terrible mistake! He was trying his best to keep +the old man from getting the faintest hint of who the Spaniard might +be, and yet his tongue seemed determined to get him into trouble in +spite of all he could do. He made several efforts to creep out of his +scrape, but the old man's eye was upon him and he made blunder after +blunder. Presently the Welshman said: + +"My boy, don't be afraid of me. I wouldn't hurt a hair of your head +for all the world. No--I'd protect you--I'd protect you. This Spaniard +is not deaf and dumb; you've let that slip without intending it; you +can't cover that up now. You know something about that Spaniard that +you want to keep dark. Now trust me--tell me what it is, and trust me +--I won't betray you." + +Huck looked into the old man's honest eyes a moment, then bent over +and whispered in his ear: + +"'Tain't a Spaniard--it's Injun Joe!" + +The Welshman almost jumped out of his chair. In a moment he said: + +"It's all plain enough, now. When you talked about notching ears and +slitting noses I judged that that was your own embellishment, because +white men don't take that sort of revenge. But an Injun! That's a +different matter altogether." + +During breakfast the talk went on, and in the course of it the old man +said that the last thing which he and his sons had done, before going +to bed, was to get a lantern and examine the stile and its vicinity for +marks of blood. They found none, but captured a bulky bundle of-- + +"Of WHAT?" + +If the words had been lightning they could not have leaped with a more +stunning suddenness from Huck's blanched lips. His eyes were staring +wide, now, and his breath suspended--waiting for the answer. The +Welshman started--stared in return--three seconds--five seconds--ten +--then replied: + +"Of burglar's tools. Why, what's the MATTER with you?" + +Huck sank back, panting gently, but deeply, unutterably grateful. The +Welshman eyed him gravely, curiously--and presently said: + +"Yes, burglar's tools. That appears to relieve you a good deal. But +what did give you that turn? What were YOU expecting we'd found?" + +Huck was in a close place--the inquiring eye was upon him--he would +have given anything for material for a plausible answer--nothing +suggested itself--the inquiring eye was boring deeper and deeper--a +senseless reply offered--there was no time to weigh it, so at a venture +he uttered it--feebly: + +"Sunday-school books, maybe." + +Poor Huck was too distressed to smile, but the old man laughed loud +and joyously, shook up the details of his anatomy from head to foot, +and ended by saying that such a laugh was money in a-man's pocket, +because it cut down the doctor's bill like everything. Then he added: + +"Poor old chap, you're white and jaded--you ain't well a bit--no +wonder you're a little flighty and off your balance. But you'll come +out of it. Rest and sleep will fetch you out all right, I hope." + +Huck was irritated to think he had been such a goose and betrayed such +a suspicious excitement, for he had dropped the idea that the parcel +brought from the tavern was the treasure, as soon as he had heard the +talk at the widow's stile. He had only thought it was not the treasure, +however--he had not known that it wasn't--and so the suggestion of a +captured bundle was too much for his self-possession. But on the whole +he felt glad the little episode had happened, for now he knew beyond +all question that that bundle was not THE bundle, and so his mind was +at rest and exceedingly comfortable. In fact, everything seemed to be +drifting just in the right direction, now; the treasure must be still +in No. 2, the men would be captured and jailed that day, and he and Tom +could seize the gold that night without any trouble or any fear of +interruption. + +Just as breakfast was completed there was a knock at the door. Huck +jumped for a hiding-place, for he had no mind to be connected even +remotely with the late event. The Welshman admitted several ladies and +gentlemen, among them the Widow Douglas, and noticed that groups of +citizens were climbing up the hill--to stare at the stile. So the news +had spread. The Welshman had to tell the story of the night to the +visitors. The widow's gratitude for her preservation was outspoken. + +"Don't say a word about it, madam. There's another that you're more +beholden to than you are to me and my boys, maybe, but he don't allow +me to tell his name. We wouldn't have been there but for him." + +Of course this excited a curiosity so vast that it almost belittled +the main matter--but the Welshman allowed it to eat into the vitals of +his visitors, and through them be transmitted to the whole town, for he +refused to part with his secret. When all else had been learned, the +widow said: + +"I went to sleep reading in bed and slept straight through all that +noise. Why didn't you come and wake me?" + +"We judged it warn't worth while. Those fellows warn't likely to come +again--they hadn't any tools left to work with, and what was the use of +waking you up and scaring you to death? My three negro men stood guard +at your house all the rest of the night. They've just come back." + +More visitors came, and the story had to be told and retold for a +couple of hours more. + +There was no Sabbath-school during day-school vacation, but everybody +was early at church. The stirring event was well canvassed. News came +that not a sign of the two villains had been yet discovered. When the +sermon was finished, Judge Thatcher's wife dropped alongside of Mrs. +Harper as she moved down the aisle with the crowd and said: + +"Is my Becky going to sleep all day? I just expected she would be +tired to death." + +"Your Becky?" + +"Yes," with a startled look--"didn't she stay with you last night?" + +"Why, no." + +Mrs. Thatcher turned pale, and sank into a pew, just as Aunt Polly, +talking briskly with a friend, passed by. Aunt Polly said: + +"Good-morning, Mrs. Thatcher. Good-morning, Mrs. Harper. I've got a +boy that's turned up missing. I reckon my Tom stayed at your house last +night--one of you. And now he's afraid to come to church. I've got to +settle with him." + +Mrs. Thatcher shook her head feebly and turned paler than ever. + +"He didn't stay with us," said Mrs. Harper, beginning to look uneasy. +A marked anxiety came into Aunt Polly's face. + +"Joe Harper, have you seen my Tom this morning?" + +"No'm." + +"When did you see him last?" + +Joe tried to remember, but was not sure he could say. The people had +stopped moving out of church. Whispers passed along, and a boding +uneasiness took possession of every countenance. Children were +anxiously questioned, and young teachers. They all said they had not +noticed whether Tom and Becky were on board the ferryboat on the +homeward trip; it was dark; no one thought of inquiring if any one was +missing. One young man finally blurted out his fear that they were +still in the cave! Mrs. Thatcher swooned away. Aunt Polly fell to +crying and wringing her hands. + +The alarm swept from lip to lip, from group to group, from street to +street, and within five minutes the bells were wildly clanging and the +whole town was up! The Cardiff Hill episode sank into instant +insignificance, the burglars were forgotten, horses were saddled, +skiffs were manned, the ferryboat ordered out, and before the horror +was half an hour old, two hundred men were pouring down highroad and +river toward the cave. + +All the long afternoon the village seemed empty and dead. Many women +visited Aunt Polly and Mrs. Thatcher and tried to comfort them. They +cried with them, too, and that was still better than words. All the +tedious night the town waited for news; but when the morning dawned at +last, all the word that came was, "Send more candles--and send food." +Mrs. Thatcher was almost crazed; and Aunt Polly, also. Judge Thatcher +sent messages of hope and encouragement from the cave, but they +conveyed no real cheer. + +The old Welshman came home toward daylight, spattered with +candle-grease, smeared with clay, and almost worn out. He found Huck +still in the bed that had been provided for him, and delirious with +fever. The physicians were all at the cave, so the Widow Douglas came +and took charge of the patient. She said she would do her best by him, +because, whether he was good, bad, or indifferent, he was the Lord's, +and nothing that was the Lord's was a thing to be neglected. The +Welshman said Huck had good spots in him, and the widow said: + +"You can depend on it. That's the Lord's mark. He don't leave it off. +He never does. Puts it somewhere on every creature that comes from his +hands." + +Early in the forenoon parties of jaded men began to straggle into the +village, but the strongest of the citizens continued searching. All the +news that could be gained was that remotenesses of the cavern were +being ransacked that had never been visited before; that every corner +and crevice was going to be thoroughly searched; that wherever one +wandered through the maze of passages, lights were to be seen flitting +hither and thither in the distance, and shoutings and pistol-shots sent +their hollow reverberations to the ear down the sombre aisles. In one +place, far from the section usually traversed by tourists, the names +"BECKY & TOM" had been found traced upon the rocky wall with +candle-smoke, and near at hand a grease-soiled bit of ribbon. Mrs. +Thatcher recognized the ribbon and cried over it. She said it was the +last relic she should ever have of her child; and that no other memorial +of her could ever be so precious, because this one parted latest from +the living body before the awful death came. Some said that now and +then, in the cave, a far-away speck of light would glimmer, and then a +glorious shout would burst forth and a score of men go trooping down the +echoing aisle--and then a sickening disappointment always followed; the +children were not there; it was only a searcher's light. + +Three dreadful days and nights dragged their tedious hours along, and +the village sank into a hopeless stupor. No one had heart for anything. +The accidental discovery, just made, that the proprietor of the +Temperance Tavern kept liquor on his premises, scarcely fluttered the +public pulse, tremendous as the fact was. In a lucid interval, Huck +feebly led up to the subject of taverns, and finally asked--dimly +dreading the worst--if anything had been discovered at the Temperance +Tavern since he had been ill. + +"Yes," said the widow. + +Huck started up in bed, wild-eyed: + +"What? What was it?" + +"Liquor!--and the place has been shut up. Lie down, child--what a turn +you did give me!" + +"Only tell me just one thing--only just one--please! Was it Tom Sawyer +that found it?" + +The widow burst into tears. "Hush, hush, child, hush! I've told you +before, you must NOT talk. You are very, very sick!" + +Then nothing but liquor had been found; there would have been a great +powwow if it had been the gold. So the treasure was gone forever--gone +forever! But what could she be crying about? Curious that she should +cry. + +These thoughts worked their dim way through Huck's mind, and under the +weariness they gave him he fell asleep. The widow said to herself: + +"There--he's asleep, poor wreck. Tom Sawyer find it! Pity but somebody +could find Tom Sawyer! Ah, there ain't many left, now, that's got hope +enough, or strength enough, either, to go on searching." + + + +CHAPTER XXXI + +NOW to return to Tom and Becky's share in the picnic. They tripped +along the murky aisles with the rest of the company, visiting the +familiar wonders of the cave--wonders dubbed with rather +over-descriptive names, such as "The Drawing-Room," "The Cathedral," +"Aladdin's Palace," and so on. Presently the hide-and-seek frolicking +began, and Tom and Becky engaged in it with zeal until the exertion +began to grow a trifle wearisome; then they wandered down a sinuous +avenue holding their candles aloft and reading the tangled web-work of +names, dates, post-office addresses, and mottoes with which the rocky +walls had been frescoed (in candle-smoke). Still drifting along and +talking, they scarcely noticed that they were now in a part of the cave +whose walls were not frescoed. They smoked their own names under an +overhanging shelf and moved on. Presently they came to a place where a +little stream of water, trickling over a ledge and carrying a limestone +sediment with it, had, in the slow-dragging ages, formed a laced and +ruffled Niagara in gleaming and imperishable stone. Tom squeezed his +small body behind it in order to illuminate it for Becky's +gratification. He found that it curtained a sort of steep natural +stairway which was enclosed between narrow walls, and at once the +ambition to be a discoverer seized him. Becky responded to his call, +and they made a smoke-mark for future guidance, and started upon their +quest. They wound this way and that, far down into the secret depths of +the cave, made another mark, and branched off in search of novelties to +tell the upper world about. In one place they found a spacious cavern, +from whose ceiling depended a multitude of shining stalactites of the +length and circumference of a man's leg; they walked all about it, +wondering and admiring, and presently left it by one of the numerous +passages that opened into it. This shortly brought them to a bewitching +spring, whose basin was incrusted with a frostwork of glittering +crystals; it was in the midst of a cavern whose walls were supported by +many fantastic pillars which had been formed by the joining of great +stalactites and stalagmites together, the result of the ceaseless +water-drip of centuries. Under the roof vast knots of bats had packed +themselves together, thousands in a bunch; the lights disturbed the +creatures and they came flocking down by hundreds, squeaking and +darting furiously at the candles. Tom knew their ways and the danger of +this sort of conduct. He seized Becky's hand and hurried her into the +first corridor that offered; and none too soon, for a bat struck +Becky's light out with its wing while she was passing out of the +cavern. The bats chased the children a good distance; but the fugitives +plunged into every new passage that offered, and at last got rid of the +perilous things. Tom found a subterranean lake, shortly, which +stretched its dim length away until its shape was lost in the shadows. +He wanted to explore its borders, but concluded that it would be best +to sit down and rest awhile, first. Now, for the first time, the deep +stillness of the place laid a clammy hand upon the spirits of the +children. Becky said: + +"Why, I didn't notice, but it seems ever so long since I heard any of +the others." + +"Come to think, Becky, we are away down below them--and I don't know +how far away north, or south, or east, or whichever it is. We couldn't +hear them here." + +Becky grew apprehensive. + +"I wonder how long we've been down here, Tom? We better start back." + +"Yes, I reckon we better. P'raps we better." + +"Can you find the way, Tom? It's all a mixed-up crookedness to me." + +"I reckon I could find it--but then the bats. If they put our candles +out it will be an awful fix. Let's try some other way, so as not to go +through there." + +"Well. But I hope we won't get lost. It would be so awful!" and the +girl shuddered at the thought of the dreadful possibilities. + +They started through a corridor, and traversed it in silence a long +way, glancing at each new opening, to see if there was anything +familiar about the look of it; but they were all strange. Every time +Tom made an examination, Becky would watch his face for an encouraging +sign, and he would say cheerily: + +"Oh, it's all right. This ain't the one, but we'll come to it right +away!" + +But he felt less and less hopeful with each failure, and presently +began to turn off into diverging avenues at sheer random, in desperate +hope of finding the one that was wanted. He still said it was "all +right," but there was such a leaden dread at his heart that the words +had lost their ring and sounded just as if he had said, "All is lost!" +Becky clung to his side in an anguish of fear, and tried hard to keep +back the tears, but they would come. At last she said: + +"Oh, Tom, never mind the bats, let's go back that way! We seem to get +worse and worse off all the time." + +"Listen!" said he. + +Profound silence; silence so deep that even their breathings were +conspicuous in the hush. Tom shouted. The call went echoing down the +empty aisles and died out in the distance in a faint sound that +resembled a ripple of mocking laughter. + +"Oh, don't do it again, Tom, it is too horrid," said Becky. + +"It is horrid, but I better, Becky; they might hear us, you know," and +he shouted again. + +The "might" was even a chillier horror than the ghostly laughter, it +so confessed a perishing hope. The children stood still and listened; +but there was no result. Tom turned upon the back track at once, and +hurried his steps. It was but a little while before a certain +indecision in his manner revealed another fearful fact to Becky--he +could not find his way back! + +"Oh, Tom, you didn't make any marks!" + +"Becky, I was such a fool! Such a fool! I never thought we might want +to come back! No--I can't find the way. It's all mixed up." + +"Tom, Tom, we're lost! we're lost! We never can get out of this awful +place! Oh, why DID we ever leave the others!" + +She sank to the ground and burst into such a frenzy of crying that Tom +was appalled with the idea that she might die, or lose her reason. He +sat down by her and put his arms around her; she buried her face in his +bosom, she clung to him, she poured out her terrors, her unavailing +regrets, and the far echoes turned them all to jeering laughter. Tom +begged her to pluck up hope again, and she said she could not. He fell +to blaming and abusing himself for getting her into this miserable +situation; this had a better effect. She said she would try to hope +again, she would get up and follow wherever he might lead if only he +would not talk like that any more. For he was no more to blame than +she, she said. + +So they moved on again--aimlessly--simply at random--all they could do +was to move, keep moving. For a little while, hope made a show of +reviving--not with any reason to back it, but only because it is its +nature to revive when the spring has not been taken out of it by age +and familiarity with failure. + +By-and-by Tom took Becky's candle and blew it out. This economy meant +so much! Words were not needed. Becky understood, and her hope died +again. She knew that Tom had a whole candle and three or four pieces in +his pockets--yet he must economize. + +By-and-by, fatigue began to assert its claims; the children tried to +pay attention, for it was dreadful to think of sitting down when time +was grown to be so precious, moving, in some direction, in any +direction, was at least progress and might bear fruit; but to sit down +was to invite death and shorten its pursuit. + +At last Becky's frail limbs refused to carry her farther. She sat +down. Tom rested with her, and they talked of home, and the friends +there, and the comfortable beds and, above all, the light! Becky cried, +and Tom tried to think of some way of comforting her, but all his +encouragements were grown threadbare with use, and sounded like +sarcasms. Fatigue bore so heavily upon Becky that she drowsed off to +sleep. Tom was grateful. He sat looking into her drawn face and saw it +grow smooth and natural under the influence of pleasant dreams; and +by-and-by a smile dawned and rested there. The peaceful face reflected +somewhat of peace and healing into his own spirit, and his thoughts +wandered away to bygone times and dreamy memories. While he was deep in +his musings, Becky woke up with a breezy little laugh--but it was +stricken dead upon her lips, and a groan followed it. + +"Oh, how COULD I sleep! I wish I never, never had waked! No! No, I +don't, Tom! Don't look so! I won't say it again." + +"I'm glad you've slept, Becky; you'll feel rested, now, and we'll find +the way out." + +"We can try, Tom; but I've seen such a beautiful country in my dream. +I reckon we are going there." + +"Maybe not, maybe not. Cheer up, Becky, and let's go on trying." + +They rose up and wandered along, hand in hand and hopeless. They tried +to estimate how long they had been in the cave, but all they knew was +that it seemed days and weeks, and yet it was plain that this could not +be, for their candles were not gone yet. A long time after this--they +could not tell how long--Tom said they must go softly and listen for +dripping water--they must find a spring. They found one presently, and +Tom said it was time to rest again. Both were cruelly tired, yet Becky +said she thought she could go a little farther. She was surprised to +hear Tom dissent. She could not understand it. They sat down, and Tom +fastened his candle to the wall in front of them with some clay. +Thought was soon busy; nothing was said for some time. Then Becky broke +the silence: + +"Tom, I am so hungry!" + +Tom took something out of his pocket. + +"Do you remember this?" said he. + +Becky almost smiled. + +"It's our wedding-cake, Tom." + +"Yes--I wish it was as big as a barrel, for it's all we've got." + +"I saved it from the picnic for us to dream on, Tom, the way grown-up +people do with wedding-cake--but it'll be our--" + +She dropped the sentence where it was. Tom divided the cake and Becky +ate with good appetite, while Tom nibbled at his moiety. There was +abundance of cold water to finish the feast with. By-and-by Becky +suggested that they move on again. Tom was silent a moment. Then he +said: + +"Becky, can you bear it if I tell you something?" + +Becky's face paled, but she thought she could. + +"Well, then, Becky, we must stay here, where there's water to drink. +That little piece is our last candle!" + +Becky gave loose to tears and wailings. Tom did what he could to +comfort her, but with little effect. At length Becky said: + +"Tom!" + +"Well, Becky?" + +"They'll miss us and hunt for us!" + +"Yes, they will! Certainly they will!" + +"Maybe they're hunting for us now, Tom." + +"Why, I reckon maybe they are. I hope they are." + +"When would they miss us, Tom?" + +"When they get back to the boat, I reckon." + +"Tom, it might be dark then--would they notice we hadn't come?" + +"I don't know. But anyway, your mother would miss you as soon as they +got home." + +A frightened look in Becky's face brought Tom to his senses and he saw +that he had made a blunder. Becky was not to have gone home that night! +The children became silent and thoughtful. In a moment a new burst of +grief from Becky showed Tom that the thing in his mind had struck hers +also--that the Sabbath morning might be half spent before Mrs. Thatcher +discovered that Becky was not at Mrs. Harper's. + +The children fastened their eyes upon their bit of candle and watched +it melt slowly and pitilessly away; saw the half inch of wick stand +alone at last; saw the feeble flame rise and fall, climb the thin +column of smoke, linger at its top a moment, and then--the horror of +utter darkness reigned! + +How long afterward it was that Becky came to a slow consciousness that +she was crying in Tom's arms, neither could tell. All that they knew +was, that after what seemed a mighty stretch of time, both awoke out of +a dead stupor of sleep and resumed their miseries once more. Tom said +it might be Sunday, now--maybe Monday. He tried to get Becky to talk, +but her sorrows were too oppressive, all her hopes were gone. Tom said +that they must have been missed long ago, and no doubt the search was +going on. He would shout and maybe some one would come. He tried it; +but in the darkness the distant echoes sounded so hideously that he +tried it no more. + +The hours wasted away, and hunger came to torment the captives again. +A portion of Tom's half of the cake was left; they divided and ate it. +But they seemed hungrier than before. The poor morsel of food only +whetted desire. + +By-and-by Tom said: + +"SH! Did you hear that?" + +Both held their breath and listened. There was a sound like the +faintest, far-off shout. Instantly Tom answered it, and leading Becky +by the hand, started groping down the corridor in its direction. +Presently he listened again; again the sound was heard, and apparently +a little nearer. + +"It's them!" said Tom; "they're coming! Come along, Becky--we're all +right now!" + +The joy of the prisoners was almost overwhelming. Their speed was +slow, however, because pitfalls were somewhat common, and had to be +guarded against. They shortly came to one and had to stop. It might be +three feet deep, it might be a hundred--there was no passing it at any +rate. Tom got down on his breast and reached as far down as he could. +No bottom. They must stay there and wait until the searchers came. They +listened; evidently the distant shoutings were growing more distant! a +moment or two more and they had gone altogether. The heart-sinking +misery of it! Tom whooped until he was hoarse, but it was of no use. He +talked hopefully to Becky; but an age of anxious waiting passed and no +sounds came again. + +The children groped their way back to the spring. The weary time +dragged on; they slept again, and awoke famished and woe-stricken. Tom +believed it must be Tuesday by this time. + +Now an idea struck him. There were some side passages near at hand. It +would be better to explore some of these than bear the weight of the +heavy time in idleness. He took a kite-line from his pocket, tied it to +a projection, and he and Becky started, Tom in the lead, unwinding the +line as he groped along. At the end of twenty steps the corridor ended +in a "jumping-off place." Tom got down on his knees and felt below, and +then as far around the corner as he could reach with his hands +conveniently; he made an effort to stretch yet a little farther to the +right, and at that moment, not twenty yards away, a human hand, holding +a candle, appeared from behind a rock! Tom lifted up a glorious shout, +and instantly that hand was followed by the body it belonged to--Injun +Joe's! Tom was paralyzed; he could not move. He was vastly gratified +the next moment, to see the "Spaniard" take to his heels and get +himself out of sight. Tom wondered that Joe had not recognized his +voice and come over and killed him for testifying in court. But the +echoes must have disguised the voice. Without doubt, that was it, he +reasoned. Tom's fright weakened every muscle in his body. He said to +himself that if he had strength enough to get back to the spring he +would stay there, and nothing should tempt him to run the risk of +meeting Injun Joe again. He was careful to keep from Becky what it was +he had seen. He told her he had only shouted "for luck." + +But hunger and wretchedness rise superior to fears in the long run. +Another tedious wait at the spring and another long sleep brought +changes. The children awoke tortured with a raging hunger. Tom believed +that it must be Wednesday or Thursday or even Friday or Saturday, now, +and that the search had been given over. He proposed to explore another +passage. He felt willing to risk Injun Joe and all other terrors. But +Becky was very weak. She had sunk into a dreary apathy and would not be +roused. She said she would wait, now, where she was, and die--it would +not be long. She told Tom to go with the kite-line and explore if he +chose; but she implored him to come back every little while and speak +to her; and she made him promise that when the awful time came, he +would stay by her and hold her hand until all was over. + +Tom kissed her, with a choking sensation in his throat, and made a +show of being confident of finding the searchers or an escape from the +cave; then he took the kite-line in his hand and went groping down one +of the passages on his hands and knees, distressed with hunger and sick +with bodings of coming doom. + + + +CHAPTER XXXII + +TUESDAY afternoon came, and waned to the twilight. The village of St. +Petersburg still mourned. The lost children had not been found. Public +prayers had been offered up for them, and many and many a private +prayer that had the petitioner's whole heart in it; but still no good +news came from the cave. The majority of the searchers had given up the +quest and gone back to their daily avocations, saying that it was plain +the children could never be found. Mrs. Thatcher was very ill, and a +great part of the time delirious. People said it was heartbreaking to +hear her call her child, and raise her head and listen a whole minute +at a time, then lay it wearily down again with a moan. Aunt Polly had +drooped into a settled melancholy, and her gray hair had grown almost +white. The village went to its rest on Tuesday night, sad and forlorn. + +Away in the middle of the night a wild peal burst from the village +bells, and in a moment the streets were swarming with frantic half-clad +people, who shouted, "Turn out! turn out! they're found! they're +found!" Tin pans and horns were added to the din, the population massed +itself and moved toward the river, met the children coming in an open +carriage drawn by shouting citizens, thronged around it, joined its +homeward march, and swept magnificently up the main street roaring +huzzah after huzzah! + +The village was illuminated; nobody went to bed again; it was the +greatest night the little town had ever seen. During the first half-hour +a procession of villagers filed through Judge Thatcher's house, seized +the saved ones and kissed them, squeezed Mrs. Thatcher's hand, tried to +speak but couldn't--and drifted out raining tears all over the place. + +Aunt Polly's happiness was complete, and Mrs. Thatcher's nearly so. It +would be complete, however, as soon as the messenger dispatched with +the great news to the cave should get the word to her husband. Tom lay +upon a sofa with an eager auditory about him and told the history of +the wonderful adventure, putting in many striking additions to adorn it +withal; and closed with a description of how he left Becky and went on +an exploring expedition; how he followed two avenues as far as his +kite-line would reach; how he followed a third to the fullest stretch of +the kite-line, and was about to turn back when he glimpsed a far-off +speck that looked like daylight; dropped the line and groped toward it, +pushed his head and shoulders through a small hole, and saw the broad +Mississippi rolling by! And if it had only happened to be night he would +not have seen that speck of daylight and would not have explored that +passage any more! He told how he went back for Becky and broke the good +news and she told him not to fret her with such stuff, for she was +tired, and knew she was going to die, and wanted to. He described how he +labored with her and convinced her; and how she almost died for joy when +she had groped to where she actually saw the blue speck of daylight; how +he pushed his way out at the hole and then helped her out; how they sat +there and cried for gladness; how some men came along in a skiff and Tom +hailed them and told them their situation and their famished condition; +how the men didn't believe the wild tale at first, "because," said they, +"you are five miles down the river below the valley the cave is in" +--then took them aboard, rowed to a house, gave them supper, made them +rest till two or three hours after dark and then brought them home. + +Before day-dawn, Judge Thatcher and the handful of searchers with him +were tracked out, in the cave, by the twine clews they had strung +behind them, and informed of the great news. + +Three days and nights of toil and hunger in the cave were not to be +shaken off at once, as Tom and Becky soon discovered. They were +bedridden all of Wednesday and Thursday, and seemed to grow more and +more tired and worn, all the time. Tom got about, a little, on +Thursday, was down-town Friday, and nearly as whole as ever Saturday; +but Becky did not leave her room until Sunday, and then she looked as +if she had passed through a wasting illness. + +Tom learned of Huck's sickness and went to see him on Friday, but +could not be admitted to the bedroom; neither could he on Saturday or +Sunday. He was admitted daily after that, but was warned to keep still +about his adventure and introduce no exciting topic. The Widow Douglas +stayed by to see that he obeyed. At home Tom learned of the Cardiff +Hill event; also that the "ragged man's" body had eventually been found +in the river near the ferry-landing; he had been drowned while trying +to escape, perhaps. + +About a fortnight after Tom's rescue from the cave, he started off to +visit Huck, who had grown plenty strong enough, now, to hear exciting +talk, and Tom had some that would interest him, he thought. Judge +Thatcher's house was on Tom's way, and he stopped to see Becky. The +Judge and some friends set Tom to talking, and some one asked him +ironically if he wouldn't like to go to the cave again. Tom said he +thought he wouldn't mind it. The Judge said: + +"Well, there are others just like you, Tom, I've not the least doubt. +But we have taken care of that. Nobody will get lost in that cave any +more." + +"Why?" + +"Because I had its big door sheathed with boiler iron two weeks ago, +and triple-locked--and I've got the keys." + +Tom turned as white as a sheet. + +"What's the matter, boy! Here, run, somebody! Fetch a glass of water!" + +The water was brought and thrown into Tom's face. + +"Ah, now you're all right. What was the matter with you, Tom?" + +"Oh, Judge, Injun Joe's in the cave!" + + + +CHAPTER XXXIII + +WITHIN a few minutes the news had spread, and a dozen skiff-loads of +men were on their way to McDougal's cave, and the ferryboat, well +filled with passengers, soon followed. Tom Sawyer was in the skiff that +bore Judge Thatcher. + +When the cave door was unlocked, a sorrowful sight presented itself in +the dim twilight of the place. Injun Joe lay stretched upon the ground, +dead, with his face close to the crack of the door, as if his longing +eyes had been fixed, to the latest moment, upon the light and the cheer +of the free world outside. Tom was touched, for he knew by his own +experience how this wretch had suffered. His pity was moved, but +nevertheless he felt an abounding sense of relief and security, now, +which revealed to him in a degree which he had not fully appreciated +before how vast a weight of dread had been lying upon him since the day +he lifted his voice against this bloody-minded outcast. + +Injun Joe's bowie-knife lay close by, its blade broken in two. The +great foundation-beam of the door had been chipped and hacked through, +with tedious labor; useless labor, too, it was, for the native rock +formed a sill outside it, and upon that stubborn material the knife had +wrought no effect; the only damage done was to the knife itself. But if +there had been no stony obstruction there the labor would have been +useless still, for if the beam had been wholly cut away Injun Joe could +not have squeezed his body under the door, and he knew it. So he had +only hacked that place in order to be doing something--in order to pass +the weary time--in order to employ his tortured faculties. Ordinarily +one could find half a dozen bits of candle stuck around in the crevices +of this vestibule, left there by tourists; but there were none now. The +prisoner had searched them out and eaten them. He had also contrived to +catch a few bats, and these, also, he had eaten, leaving only their +claws. The poor unfortunate had starved to death. In one place, near at +hand, a stalagmite had been slowly growing up from the ground for ages, +builded by the water-drip from a stalactite overhead. The captive had +broken off the stalagmite, and upon the stump had placed a stone, +wherein he had scooped a shallow hollow to catch the precious drop +that fell once in every three minutes with the dreary regularity of a +clock-tick--a dessertspoonful once in four and twenty hours. That drop +was falling when the Pyramids were new; when Troy fell; when the +foundations of Rome were laid; when Christ was crucified; when the +Conqueror created the British empire; when Columbus sailed; when the +massacre at Lexington was "news." It is falling now; it will still be +falling when all these things shall have sunk down the afternoon of +history, and the twilight of tradition, and been swallowed up in the +thick night of oblivion. Has everything a purpose and a mission? Did +this drop fall patiently during five thousand years to be ready for +this flitting human insect's need? and has it another important object +to accomplish ten thousand years to come? No matter. It is many and +many a year since the hapless half-breed scooped out the stone to catch +the priceless drops, but to this day the tourist stares longest at that +pathetic stone and that slow-dropping water when he comes to see the +wonders of McDougal's cave. Injun Joe's cup stands first in the list of +the cavern's marvels; even "Aladdin's Palace" cannot rival it. + +Injun Joe was buried near the mouth of the cave; and people flocked +there in boats and wagons from the towns and from all the farms and +hamlets for seven miles around; they brought their children, and all +sorts of provisions, and confessed that they had had almost as +satisfactory a time at the funeral as they could have had at the +hanging. + +This funeral stopped the further growth of one thing--the petition to +the governor for Injun Joe's pardon. The petition had been largely +signed; many tearful and eloquent meetings had been held, and a +committee of sappy women been appointed to go in deep mourning and wail +around the governor, and implore him to be a merciful ass and trample +his duty under foot. Injun Joe was believed to have killed five +citizens of the village, but what of that? If he had been Satan himself +there would have been plenty of weaklings ready to scribble their names +to a pardon-petition, and drip a tear on it from their permanently +impaired and leaky water-works. + +The morning after the funeral Tom took Huck to a private place to have +an important talk. Huck had learned all about Tom's adventure from the +Welshman and the Widow Douglas, by this time, but Tom said he reckoned +there was one thing they had not told him; that thing was what he +wanted to talk about now. Huck's face saddened. He said: + +"I know what it is. You got into No. 2 and never found anything but +whiskey. Nobody told me it was you; but I just knowed it must 'a' ben +you, soon as I heard 'bout that whiskey business; and I knowed you +hadn't got the money becuz you'd 'a' got at me some way or other and +told me even if you was mum to everybody else. Tom, something's always +told me we'd never get holt of that swag." + +"Why, Huck, I never told on that tavern-keeper. YOU know his tavern +was all right the Saturday I went to the picnic. Don't you remember you +was to watch there that night?" + +"Oh yes! Why, it seems 'bout a year ago. It was that very night that I +follered Injun Joe to the widder's." + +"YOU followed him?" + +"Yes--but you keep mum. I reckon Injun Joe's left friends behind him, +and I don't want 'em souring on me and doing me mean tricks. If it +hadn't ben for me he'd be down in Texas now, all right." + +Then Huck told his entire adventure in confidence to Tom, who had only +heard of the Welshman's part of it before. + +"Well," said Huck, presently, coming back to the main question, +"whoever nipped the whiskey in No. 2, nipped the money, too, I reckon +--anyways it's a goner for us, Tom." + +"Huck, that money wasn't ever in No. 2!" + +"What!" Huck searched his comrade's face keenly. "Tom, have you got on +the track of that money again?" + +"Huck, it's in the cave!" + +Huck's eyes blazed. + +"Say it again, Tom." + +"The money's in the cave!" + +"Tom--honest injun, now--is it fun, or earnest?" + +"Earnest, Huck--just as earnest as ever I was in my life. Will you go +in there with me and help get it out?" + +"I bet I will! I will if it's where we can blaze our way to it and not +get lost." + +"Huck, we can do that without the least little bit of trouble in the +world." + +"Good as wheat! What makes you think the money's--" + +"Huck, you just wait till we get in there. If we don't find it I'll +agree to give you my drum and every thing I've got in the world. I +will, by jings." + +"All right--it's a whiz. When do you say?" + +"Right now, if you say it. Are you strong enough?" + +"Is it far in the cave? I ben on my pins a little, three or four days, +now, but I can't walk more'n a mile, Tom--least I don't think I could." + +"It's about five mile into there the way anybody but me would go, +Huck, but there's a mighty short cut that they don't anybody but me +know about. Huck, I'll take you right to it in a skiff. I'll float the +skiff down there, and I'll pull it back again all by myself. You +needn't ever turn your hand over." + +"Less start right off, Tom." + +"All right. We want some bread and meat, and our pipes, and a little +bag or two, and two or three kite-strings, and some of these +new-fangled things they call lucifer matches. I tell you, many's +the time I wished I had some when I was in there before." + +A trifle after noon the boys borrowed a small skiff from a citizen who +was absent, and got under way at once. When they were several miles +below "Cave Hollow," Tom said: + +"Now you see this bluff here looks all alike all the way down from the +cave hollow--no houses, no wood-yards, bushes all alike. But do you see +that white place up yonder where there's been a landslide? Well, that's +one of my marks. We'll get ashore, now." + +They landed. + +"Now, Huck, where we're a-standing you could touch that hole I got out +of with a fishing-pole. See if you can find it." + +Huck searched all the place about, and found nothing. Tom proudly +marched into a thick clump of sumach bushes and said: + +"Here you are! Look at it, Huck; it's the snuggest hole in this +country. You just keep mum about it. All along I've been wanting to be +a robber, but I knew I'd got to have a thing like this, and where to +run across it was the bother. We've got it now, and we'll keep it +quiet, only we'll let Joe Harper and Ben Rogers in--because of course +there's got to be a Gang, or else there wouldn't be any style about it. +Tom Sawyer's Gang--it sounds splendid, don't it, Huck?" + +"Well, it just does, Tom. And who'll we rob?" + +"Oh, most anybody. Waylay people--that's mostly the way." + +"And kill them?" + +"No, not always. Hive them in the cave till they raise a ransom." + +"What's a ransom?" + +"Money. You make them raise all they can, off'n their friends; and +after you've kept them a year, if it ain't raised then you kill them. +That's the general way. Only you don't kill the women. You shut up the +women, but you don't kill them. They're always beautiful and rich, and +awfully scared. You take their watches and things, but you always take +your hat off and talk polite. They ain't anybody as polite as robbers +--you'll see that in any book. Well, the women get to loving you, and +after they've been in the cave a week or two weeks they stop crying and +after that you couldn't get them to leave. If you drove them out they'd +turn right around and come back. It's so in all the books." + +"Why, it's real bully, Tom. I believe it's better'n to be a pirate." + +"Yes, it's better in some ways, because it's close to home and +circuses and all that." + +By this time everything was ready and the boys entered the hole, Tom +in the lead. They toiled their way to the farther end of the tunnel, +then made their spliced kite-strings fast and moved on. A few steps +brought them to the spring, and Tom felt a shudder quiver all through +him. He showed Huck the fragment of candle-wick perched on a lump of +clay against the wall, and described how he and Becky had watched the +flame struggle and expire. + +The boys began to quiet down to whispers, now, for the stillness and +gloom of the place oppressed their spirits. They went on, and presently +entered and followed Tom's other corridor until they reached the +"jumping-off place." The candles revealed the fact that it was not +really a precipice, but only a steep clay hill twenty or thirty feet +high. Tom whispered: + +"Now I'll show you something, Huck." + +He held his candle aloft and said: + +"Look as far around the corner as you can. Do you see that? There--on +the big rock over yonder--done with candle-smoke." + +"Tom, it's a CROSS!" + +"NOW where's your Number Two? 'UNDER THE CROSS,' hey? Right yonder's +where I saw Injun Joe poke up his candle, Huck!" + +Huck stared at the mystic sign awhile, and then said with a shaky voice: + +"Tom, less git out of here!" + +"What! and leave the treasure?" + +"Yes--leave it. Injun Joe's ghost is round about there, certain." + +"No it ain't, Huck, no it ain't. It would ha'nt the place where he +died--away out at the mouth of the cave--five mile from here." + +"No, Tom, it wouldn't. It would hang round the money. I know the ways +of ghosts, and so do you." + +Tom began to fear that Huck was right. Misgivings gathered in his +mind. But presently an idea occurred to him-- + +"Lookyhere, Huck, what fools we're making of ourselves! Injun Joe's +ghost ain't a going to come around where there's a cross!" + +The point was well taken. It had its effect. + +"Tom, I didn't think of that. But that's so. It's luck for us, that +cross is. I reckon we'll climb down there and have a hunt for that box." + +Tom went first, cutting rude steps in the clay hill as he descended. +Huck followed. Four avenues opened out of the small cavern which the +great rock stood in. The boys examined three of them with no result. +They found a small recess in the one nearest the base of the rock, with +a pallet of blankets spread down in it; also an old suspender, some +bacon rind, and the well-gnawed bones of two or three fowls. But there +was no money-box. The lads searched and researched this place, but in +vain. Tom said: + +"He said UNDER the cross. Well, this comes nearest to being under the +cross. It can't be under the rock itself, because that sets solid on +the ground." + +They searched everywhere once more, and then sat down discouraged. +Huck could suggest nothing. By-and-by Tom said: + +"Lookyhere, Huck, there's footprints and some candle-grease on the +clay about one side of this rock, but not on the other sides. Now, +what's that for? I bet you the money IS under the rock. I'm going to +dig in the clay." + +"That ain't no bad notion, Tom!" said Huck with animation. + +Tom's "real Barlow" was out at once, and he had not dug four inches +before he struck wood. + +"Hey, Huck!--you hear that?" + +Huck began to dig and scratch now. Some boards were soon uncovered and +removed. They had concealed a natural chasm which led under the rock. +Tom got into this and held his candle as far under the rock as he +could, but said he could not see to the end of the rift. He proposed to +explore. He stooped and passed under; the narrow way descended +gradually. He followed its winding course, first to the right, then to +the left, Huck at his heels. Tom turned a short curve, by-and-by, and +exclaimed: + +"My goodness, Huck, lookyhere!" + +It was the treasure-box, sure enough, occupying a snug little cavern, +along with an empty powder-keg, a couple of guns in leather cases, two +or three pairs of old moccasins, a leather belt, and some other rubbish +well soaked with the water-drip. + +"Got it at last!" said Huck, ploughing among the tarnished coins with +his hand. "My, but we're rich, Tom!" + +"Huck, I always reckoned we'd get it. It's just too good to believe, +but we HAVE got it, sure! Say--let's not fool around here. Let's snake +it out. Lemme see if I can lift the box." + +It weighed about fifty pounds. Tom could lift it, after an awkward +fashion, but could not carry it conveniently. + +"I thought so," he said; "THEY carried it like it was heavy, that day +at the ha'nted house. I noticed that. I reckon I was right to think of +fetching the little bags along." + +The money was soon in the bags and the boys took it up to the cross +rock. + +"Now less fetch the guns and things," said Huck. + +"No, Huck--leave them there. They're just the tricks to have when we +go to robbing. We'll keep them there all the time, and we'll hold our +orgies there, too. It's an awful snug place for orgies." + +"What orgies?" + +"I dono. But robbers always have orgies, and of course we've got to +have them, too. Come along, Huck, we've been in here a long time. It's +getting late, I reckon. I'm hungry, too. We'll eat and smoke when we +get to the skiff." + +They presently emerged into the clump of sumach bushes, looked warily +out, found the coast clear, and were soon lunching and smoking in the +skiff. As the sun dipped toward the horizon they pushed out and got +under way. Tom skimmed up the shore through the long twilight, chatting +cheerily with Huck, and landed shortly after dark. + +"Now, Huck," said Tom, "we'll hide the money in the loft of the +widow's woodshed, and I'll come up in the morning and we'll count it +and divide, and then we'll hunt up a place out in the woods for it +where it will be safe. Just you lay quiet here and watch the stuff till +I run and hook Benny Taylor's little wagon; I won't be gone a minute." + +He disappeared, and presently returned with the wagon, put the two +small sacks into it, threw some old rags on top of them, and started +off, dragging his cargo behind him. When the boys reached the +Welshman's house, they stopped to rest. Just as they were about to move +on, the Welshman stepped out and said: + +"Hallo, who's that?" + +"Huck and Tom Sawyer." + +"Good! Come along with me, boys, you are keeping everybody waiting. +Here--hurry up, trot ahead--I'll haul the wagon for you. Why, it's not +as light as it might be. Got bricks in it?--or old metal?" + +"Old metal," said Tom. + +"I judged so; the boys in this town will take more trouble and fool +away more time hunting up six bits' worth of old iron to sell to the +foundry than they would to make twice the money at regular work. But +that's human nature--hurry along, hurry along!" + +The boys wanted to know what the hurry was about. + +"Never mind; you'll see, when we get to the Widow Douglas'." + +Huck said with some apprehension--for he was long used to being +falsely accused: + +"Mr. Jones, we haven't been doing nothing." + +The Welshman laughed. + +"Well, I don't know, Huck, my boy. I don't know about that. Ain't you +and the widow good friends?" + +"Yes. Well, she's ben good friends to me, anyway." + +"All right, then. What do you want to be afraid for?" + +This question was not entirely answered in Huck's slow mind before he +found himself pushed, along with Tom, into Mrs. Douglas' drawing-room. +Mr. Jones left the wagon near the door and followed. + +The place was grandly lighted, and everybody that was of any +consequence in the village was there. The Thatchers were there, the +Harpers, the Rogerses, Aunt Polly, Sid, Mary, the minister, the editor, +and a great many more, and all dressed in their best. The widow +received the boys as heartily as any one could well receive two such +looking beings. They were covered with clay and candle-grease. Aunt +Polly blushed crimson with humiliation, and frowned and shook her head +at Tom. Nobody suffered half as much as the two boys did, however. Mr. +Jones said: + +"Tom wasn't at home, yet, so I gave him up; but I stumbled on him and +Huck right at my door, and so I just brought them along in a hurry." + +"And you did just right," said the widow. "Come with me, boys." + +She took them to a bedchamber and said: + +"Now wash and dress yourselves. Here are two new suits of clothes +--shirts, socks, everything complete. They're Huck's--no, no thanks, +Huck--Mr. Jones bought one and I the other. But they'll fit both of you. +Get into them. We'll wait--come down when you are slicked up enough." + +Then she left. + + + +CHAPTER XXXIV + +HUCK said: "Tom, we can slope, if we can find a rope. The window ain't +high from the ground." + +"Shucks! what do you want to slope for?" + +"Well, I ain't used to that kind of a crowd. I can't stand it. I ain't +going down there, Tom." + +"Oh, bother! It ain't anything. I don't mind it a bit. I'll take care +of you." + +Sid appeared. + +"Tom," said he, "auntie has been waiting for you all the afternoon. +Mary got your Sunday clothes ready, and everybody's been fretting about +you. Say--ain't this grease and clay, on your clothes?" + +"Now, Mr. Siddy, you jist 'tend to your own business. What's all this +blow-out about, anyway?" + +"It's one of the widow's parties that she's always having. This time +it's for the Welshman and his sons, on account of that scrape they +helped her out of the other night. And say--I can tell you something, +if you want to know." + +"Well, what?" + +"Why, old Mr. Jones is going to try to spring something on the people +here to-night, but I overheard him tell auntie to-day about it, as a +secret, but I reckon it's not much of a secret now. Everybody knows +--the widow, too, for all she tries to let on she don't. Mr. Jones was +bound Huck should be here--couldn't get along with his grand secret +without Huck, you know!" + +"Secret about what, Sid?" + +"About Huck tracking the robbers to the widow's. I reckon Mr. Jones +was going to make a grand time over his surprise, but I bet you it will +drop pretty flat." + +Sid chuckled in a very contented and satisfied way. + +"Sid, was it you that told?" + +"Oh, never mind who it was. SOMEBODY told--that's enough." + +"Sid, there's only one person in this town mean enough to do that, and +that's you. If you had been in Huck's place you'd 'a' sneaked down the +hill and never told anybody on the robbers. You can't do any but mean +things, and you can't bear to see anybody praised for doing good ones. +There--no thanks, as the widow says"--and Tom cuffed Sid's ears and +helped him to the door with several kicks. "Now go and tell auntie if +you dare--and to-morrow you'll catch it!" + +Some minutes later the widow's guests were at the supper-table, and a +dozen children were propped up at little side-tables in the same room, +after the fashion of that country and that day. At the proper time Mr. +Jones made his little speech, in which he thanked the widow for the +honor she was doing himself and his sons, but said that there was +another person whose modesty-- + +And so forth and so on. He sprung his secret about Huck's share in the +adventure in the finest dramatic manner he was master of, but the +surprise it occasioned was largely counterfeit and not as clamorous and +effusive as it might have been under happier circumstances. However, +the widow made a pretty fair show of astonishment, and heaped so many +compliments and so much gratitude upon Huck that he almost forgot the +nearly intolerable discomfort of his new clothes in the entirely +intolerable discomfort of being set up as a target for everybody's gaze +and everybody's laudations. + +The widow said she meant to give Huck a home under her roof and have +him educated; and that when she could spare the money she would start +him in business in a modest way. Tom's chance was come. He said: + +"Huck don't need it. Huck's rich." + +Nothing but a heavy strain upon the good manners of the company kept +back the due and proper complimentary laugh at this pleasant joke. But +the silence was a little awkward. Tom broke it: + +"Huck's got money. Maybe you don't believe it, but he's got lots of +it. Oh, you needn't smile--I reckon I can show you. You just wait a +minute." + +Tom ran out of doors. The company looked at each other with a +perplexed interest--and inquiringly at Huck, who was tongue-tied. + +"Sid, what ails Tom?" said Aunt Polly. "He--well, there ain't ever any +making of that boy out. I never--" + +Tom entered, struggling with the weight of his sacks, and Aunt Polly +did not finish her sentence. Tom poured the mass of yellow coin upon +the table and said: + +"There--what did I tell you? Half of it's Huck's and half of it's mine!" + +The spectacle took the general breath away. All gazed, nobody spoke +for a moment. Then there was a unanimous call for an explanation. Tom +said he could furnish it, and he did. The tale was long, but brimful of +interest. There was scarcely an interruption from any one to break the +charm of its flow. When he had finished, Mr. Jones said: + +"I thought I had fixed up a little surprise for this occasion, but it +don't amount to anything now. This one makes it sing mighty small, I'm +willing to allow." + +The money was counted. The sum amounted to a little over twelve +thousand dollars. It was more than any one present had ever seen at one +time before, though several persons were there who were worth +considerably more than that in property. + + + +CHAPTER XXXV + +THE reader may rest satisfied that Tom's and Huck's windfall made a +mighty stir in the poor little village of St. Petersburg. So vast a +sum, all in actual cash, seemed next to incredible. It was talked +about, gloated over, glorified, until the reason of many of the +citizens tottered under the strain of the unhealthy excitement. Every +"haunted" house in St. Petersburg and the neighboring villages was +dissected, plank by plank, and its foundations dug up and ransacked for +hidden treasure--and not by boys, but men--pretty grave, unromantic +men, too, some of them. Wherever Tom and Huck appeared they were +courted, admired, stared at. The boys were not able to remember that +their remarks had possessed weight before; but now their sayings were +treasured and repeated; everything they did seemed somehow to be +regarded as remarkable; they had evidently lost the power of doing and +saying commonplace things; moreover, their past history was raked up +and discovered to bear marks of conspicuous originality. The village +paper published biographical sketches of the boys. + +The Widow Douglas put Huck's money out at six per cent., and Judge +Thatcher did the same with Tom's at Aunt Polly's request. Each lad had +an income, now, that was simply prodigious--a dollar for every week-day +in the year and half of the Sundays. It was just what the minister got +--no, it was what he was promised--he generally couldn't collect it. A +dollar and a quarter a week would board, lodge, and school a boy in +those old simple days--and clothe him and wash him, too, for that +matter. + +Judge Thatcher had conceived a great opinion of Tom. He said that no +commonplace boy would ever have got his daughter out of the cave. When +Becky told her father, in strict confidence, how Tom had taken her +whipping at school, the Judge was visibly moved; and when she pleaded +grace for the mighty lie which Tom had told in order to shift that +whipping from her shoulders to his own, the Judge said with a fine +outburst that it was a noble, a generous, a magnanimous lie--a lie that +was worthy to hold up its head and march down through history breast to +breast with George Washington's lauded Truth about the hatchet! Becky +thought her father had never looked so tall and so superb as when he +walked the floor and stamped his foot and said that. She went straight +off and told Tom about it. + +Judge Thatcher hoped to see Tom a great lawyer or a great soldier some +day. He said he meant to look to it that Tom should be admitted to the +National Military Academy and afterward trained in the best law school +in the country, in order that he might be ready for either career or +both. + +Huck Finn's wealth and the fact that he was now under the Widow +Douglas' protection introduced him into society--no, dragged him into +it, hurled him into it--and his sufferings were almost more than he +could bear. The widow's servants kept him clean and neat, combed and +brushed, and they bedded him nightly in unsympathetic sheets that had +not one little spot or stain which he could press to his heart and know +for a friend. He had to eat with a knife and fork; he had to use +napkin, cup, and plate; he had to learn his book, he had to go to +church; he had to talk so properly that speech was become insipid in +his mouth; whithersoever he turned, the bars and shackles of +civilization shut him in and bound him hand and foot. + +He bravely bore his miseries three weeks, and then one day turned up +missing. For forty-eight hours the widow hunted for him everywhere in +great distress. The public were profoundly concerned; they searched +high and low, they dragged the river for his body. Early the third +morning Tom Sawyer wisely went poking among some old empty hogsheads +down behind the abandoned slaughter-house, and in one of them he found +the refugee. Huck had slept there; he had just breakfasted upon some +stolen odds and ends of food, and was lying off, now, in comfort, with +his pipe. He was unkempt, uncombed, and clad in the same old ruin of +rags that had made him picturesque in the days when he was free and +happy. Tom routed him out, told him the trouble he had been causing, +and urged him to go home. Huck's face lost its tranquil content, and +took a melancholy cast. He said: + +"Don't talk about it, Tom. I've tried it, and it don't work; it don't +work, Tom. It ain't for me; I ain't used to it. The widder's good to +me, and friendly; but I can't stand them ways. She makes me get up just +at the same time every morning; she makes me wash, they comb me all to +thunder; she won't let me sleep in the woodshed; I got to wear them +blamed clothes that just smothers me, Tom; they don't seem to any air +git through 'em, somehow; and they're so rotten nice that I can't set +down, nor lay down, nor roll around anywher's; I hain't slid on a +cellar-door for--well, it 'pears to be years; I got to go to church and +sweat and sweat--I hate them ornery sermons! I can't ketch a fly in +there, I can't chaw. I got to wear shoes all Sunday. The widder eats by +a bell; she goes to bed by a bell; she gits up by a bell--everything's +so awful reg'lar a body can't stand it." + +"Well, everybody does that way, Huck." + +"Tom, it don't make no difference. I ain't everybody, and I can't +STAND it. It's awful to be tied up so. And grub comes too easy--I don't +take no interest in vittles, that way. I got to ask to go a-fishing; I +got to ask to go in a-swimming--dern'd if I hain't got to ask to do +everything. Well, I'd got to talk so nice it wasn't no comfort--I'd got +to go up in the attic and rip out awhile, every day, to git a taste in +my mouth, or I'd a died, Tom. The widder wouldn't let me smoke; she +wouldn't let me yell, she wouldn't let me gape, nor stretch, nor +scratch, before folks--" [Then with a spasm of special irritation and +injury]--"And dad fetch it, she prayed all the time! I never see such a +woman! I HAD to shove, Tom--I just had to. And besides, that school's +going to open, and I'd a had to go to it--well, I wouldn't stand THAT, +Tom. Looky here, Tom, being rich ain't what it's cracked up to be. It's +just worry and worry, and sweat and sweat, and a-wishing you was dead +all the time. Now these clothes suits me, and this bar'l suits me, and +I ain't ever going to shake 'em any more. Tom, I wouldn't ever got into +all this trouble if it hadn't 'a' ben for that money; now you just take +my sheer of it along with your'n, and gimme a ten-center sometimes--not +many times, becuz I don't give a dern for a thing 'thout it's tollable +hard to git--and you go and beg off for me with the widder." + +"Oh, Huck, you know I can't do that. 'Tain't fair; and besides if +you'll try this thing just a while longer you'll come to like it." + +"Like it! Yes--the way I'd like a hot stove if I was to set on it long +enough. No, Tom, I won't be rich, and I won't live in them cussed +smothery houses. I like the woods, and the river, and hogsheads, and +I'll stick to 'em, too. Blame it all! just as we'd got guns, and a +cave, and all just fixed to rob, here this dern foolishness has got to +come up and spile it all!" + +Tom saw his opportunity-- + +"Lookyhere, Huck, being rich ain't going to keep me back from turning +robber." + +"No! Oh, good-licks; are you in real dead-wood earnest, Tom?" + +"Just as dead earnest as I'm sitting here. But Huck, we can't let you +into the gang if you ain't respectable, you know." + +Huck's joy was quenched. + +"Can't let me in, Tom? Didn't you let me go for a pirate?" + +"Yes, but that's different. A robber is more high-toned than what a +pirate is--as a general thing. In most countries they're awful high up +in the nobility--dukes and such." + +"Now, Tom, hain't you always ben friendly to me? You wouldn't shet me +out, would you, Tom? You wouldn't do that, now, WOULD you, Tom?" + +"Huck, I wouldn't want to, and I DON'T want to--but what would people +say? Why, they'd say, 'Mph! Tom Sawyer's Gang! pretty low characters in +it!' They'd mean you, Huck. You wouldn't like that, and I wouldn't." + +Huck was silent for some time, engaged in a mental struggle. Finally +he said: + +"Well, I'll go back to the widder for a month and tackle it and see if +I can come to stand it, if you'll let me b'long to the gang, Tom." + +"All right, Huck, it's a whiz! Come along, old chap, and I'll ask the +widow to let up on you a little, Huck." + +"Will you, Tom--now will you? That's good. If she'll let up on some of +the roughest things, I'll smoke private and cuss private, and crowd +through or bust. When you going to start the gang and turn robbers?" + +"Oh, right off. We'll get the boys together and have the initiation +to-night, maybe." + +"Have the which?" + +"Have the initiation." + +"What's that?" + +"It's to swear to stand by one another, and never tell the gang's +secrets, even if you're chopped all to flinders, and kill anybody and +all his family that hurts one of the gang." + +"That's gay--that's mighty gay, Tom, I tell you." + +"Well, I bet it is. And all that swearing's got to be done at +midnight, in the lonesomest, awfulest place you can find--a ha'nted +house is the best, but they're all ripped up now." + +"Well, midnight's good, anyway, Tom." + +"Yes, so it is. And you've got to swear on a coffin, and sign it with +blood." + +"Now, that's something LIKE! Why, it's a million times bullier than +pirating. I'll stick to the widder till I rot, Tom; and if I git to be +a reg'lar ripper of a robber, and everybody talking 'bout it, I reckon +she'll be proud she snaked me in out of the wet." + + + +CONCLUSION + +SO endeth this chronicle. It being strictly a history of a BOY, it +must stop here; the story could not go much further without becoming +the history of a MAN. When one writes a novel about grown people, he +knows exactly where to stop--that is, with a marriage; but when he +writes of juveniles, he must stop where he best can. + +Most of the characters that perform in this book still live, and are +prosperous and happy. Some day it may seem worth while to take up the +story of the younger ones again and see what sort of men and women they +turned out to be; therefore it will be wisest not to reveal any of that +part of their lives at present. + + + + + +End of the Project Gutenberg EBook of The Adventures of Tom Sawyer, Complete +by Mark Twain (Samuel Clemens) diff --git a/vendor/github.com/klauspost/compress/testdata/case1.bin b/vendor/github.com/klauspost/compress/testdata/case1.bin new file mode 100644 index 00000000000..723b4bc222c Binary files /dev/null and b/vendor/github.com/klauspost/compress/testdata/case1.bin differ diff --git a/vendor/github.com/klauspost/compress/testdata/case2.bin b/vendor/github.com/klauspost/compress/testdata/case2.bin new file mode 100644 index 00000000000..c34928f3d30 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/case2.bin @@ -0,0 +1 @@ +5555555555555054072307277407224532200600565 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/case3.bin b/vendor/github.com/klauspost/compress/testdata/case3.bin new file mode 100644 index 00000000000..c44f88d47d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/case3.bin @@ -0,0 +1 @@ +1502302578000010610834044101581512311545562525 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/crash1.bin b/vendor/github.com/klauspost/compress/testdata/crash1.bin new file mode 100644 index 00000000000..d483846c4b8 Binary files /dev/null and b/vendor/github.com/klauspost/compress/testdata/crash1.bin differ diff --git a/vendor/github.com/klauspost/compress/testdata/crash2.bin b/vendor/github.com/klauspost/compress/testdata/crash2.bin new file mode 100644 index 00000000000..721a65318ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/crash2.bin @@ -0,0 +1 @@ +313254615470505 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/crash3.bin b/vendor/github.com/klauspost/compress/testdata/crash3.bin new file mode 100644 index 00000000000..3e3d493a444 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/crash3.bin @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/crash4.bin b/vendor/github.com/klauspost/compress/testdata/crash4.bin new file mode 100644 index 00000000000..725b093f0bf --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/crash4.bin @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/crash5.bin b/vendor/github.com/klauspost/compress/testdata/crash5.bin new file mode 100644 index 00000000000..a521217b0ce --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/crash5.bin @@ -0,0 +1 @@ +55511151231257827021181583404541015625 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/dec-hang1.bin b/vendor/github.com/klauspost/compress/testdata/dec-hang1.bin new file mode 100644 index 00000000000..e07926fba0d --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/dec-hang1.bin @@ -0,0 +1 @@ +$ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/dec-hang2.bin b/vendor/github.com/klauspost/compress/testdata/dec-hang2.bin new file mode 100644 index 00000000000..c1484ef81d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/dec-hang2.bin @@ -0,0 +1 @@ +|CO \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/dec-hang3.bin b/vendor/github.com/klauspost/compress/testdata/dec-hang3.bin new file mode 100644 index 00000000000..3bd97e9c168 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/dec-hang3.bin @@ -0,0 +1 @@ +VV \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/dec-symlen1.bin b/vendor/github.com/klauspost/compress/testdata/dec-symlen1.bin new file mode 100644 index 00000000000..c5589c9936a Binary files /dev/null and b/vendor/github.com/klauspost/compress/testdata/dec-symlen1.bin differ diff --git a/vendor/github.com/klauspost/compress/testdata/e.txt b/vendor/github.com/klauspost/compress/testdata/e.txt new file mode 100644 index 00000000000..5ca186f14c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/e.txt @@ -0,0 +1 @@ +2.7182818284590452353602874713526624977572470936999595749669676277240766303535475945713821785251664274274663919320030599218174135966290435729003342952605956307381323286279434907632338298807531952510190115738341879307021540891499348841675092447614606680822648001684774118537423454424371075390777449920695517027618386062613313845830007520449338265602976067371132007093287091274437470472306969772093101416928368190255151086574637721112523897844250569536967707854499699679468644549059879316368892300987931277361782154249992295763514822082698951936680331825288693984964651058209392398294887933203625094431173012381970684161403970198376793206832823764648042953118023287825098194558153017567173613320698112509961818815930416903515988885193458072738667385894228792284998920868058257492796104841984443634632449684875602336248270419786232090021609902353043699418491463140934317381436405462531520961836908887070167683964243781405927145635490613031072085103837505101157477041718986106873969655212671546889570350354021234078498193343210681701210056278802351930332247450158539047304199577770935036604169973297250886876966403555707162268447162560798826517871341951246652010305921236677194325278675398558944896970964097545918569563802363701621120477427228364896134225164450781824423529486363721417402388934412479635743702637552944483379980161254922785092577825620926226483262779333865664816277251640191059004916449982893150566047258027786318641551956532442586982946959308019152987211725563475463964479101459040905862984967912874068705048958586717479854667757573205681288459205413340539220001137863009455606881667400169842055804033637953764520304024322566135278369511778838638744396625322498506549958862342818997077332761717839280349465014345588970719425863987727547109629537415211151368350627526023264847287039207643100595841166120545297030236472549296669381151373227536450988890313602057248176585118063036442812314965507047510254465011727211555194866850800368532281831521960037356252794495158284188294787610852639813955990067376482922443752871846245780361929819713991475644882626039033814418232625150974827987779964373089970388867782271383605772978824125611907176639465070633045279546618550966661856647097113444740160704626215680717481877844371436988218559670959102596862002353718588748569652200050311734392073211390803293634479727355955277349071783793421637012050054513263835440001863239914907054797780566978533580489669062951194324730995876552368128590413832411607226029983305353708761389396391779574540161372236187893652605381558415871869255386061647798340254351284396129460352913325942794904337299085731580290958631382683291477116396337092400316894586360606458459251269946557248391865642097526850823075442545993769170419777800853627309417101634349076964237222943523661255725088147792231519747780605696725380171807763603462459278778465850656050780844211529697521890874019660906651803516501792504619501366585436632712549639908549144200014574760819302212066024330096412704894390397177195180699086998606636583232278709376502260149291011517177635944602023249300280401867723910288097866605651183260043688508817157238669842242201024950551881694803221002515426494639812873677658927688163598312477886520141174110913601164995076629077943646005851941998560162647907615321038727557126992518275687989302761761146162549356495903798045838182323368612016243736569846703785853305275833337939907521660692380533698879565137285593883499894707416181550125397064648171946708348197214488898790676503795903669672494992545279033729636162658976039498576741397359441023744329709355477982629614591442936451428617158587339746791897571211956187385783644758448423555581050025611492391518893099463428413936080383091662818811503715284967059741625628236092168075150177725387402564253470879089137291722828611515915683725241630772254406337875931059826760944203261924285317018781772960235413060672136046000389661093647095141417185777014180606443636815464440053316087783143174440811949422975599314011888683314832802706553833004693290115744147563139997221703804617092894579096271662260740718749975359212756084414737823303270330168237193648002173285734935947564334129943024850235732214597843282641421684878721673367010615094243456984401873312810107945127223737886126058165668053714396127888732527373890392890506865324138062796025930387727697783792868409325365880733988457218746021005311483351323850047827169376218004904795597959290591655470505777514308175112698985188408718564026035305583737832422924185625644255022672155980274012617971928047139600689163828665277009752767069777036439260224372841840883251848770472638440379530166905465937461619323840363893131364327137688841026811219891275223056256756254701725086349765367288605966752740868627407912856576996313789753034660616669804218267724560530660773899624218340859882071864682623215080288286359746839654358856685503773131296587975810501214916207656769950659715344763470320853215603674828608378656803073062657633469774295634643716709397193060876963495328846833613038829431040800296873869117066666146800015121143442256023874474325250769387077775193299942137277211258843608715834835626961661980572526612206797540621062080649882918454395301529982092503005498257043390553570168653120526495614857249257386206917403695213533732531666345466588597286659451136441370331393672118569553952108458407244323835586063106806964924851232632699514603596037297253198368423363904632136710116192821711150282801604488058802382031981493096369596735832742024988245684941273860566491352526706046234450549227581151709314921879592718001940968866986837037302200475314338181092708030017205935530520700706072233999463990571311587099635777359027196285061146514837526209565346713290025994397663114545902685898979115837093419370441155121920117164880566945938131183843765620627846310490346293950029458341164824114969758326011800731699437393506966295712410273239138741754923071862454543222039552735295240245903805744502892246886285336542213815722131163288112052146489805180092024719391710555390113943316681515828843687606961102505171007392762385553386272553538830960671644662370922646809671254061869502143176211668140097595281493907222601112681153108387317617323235263605838173151034595736538223534992935822836851007810884634349983518404451704270189381994243410090575376257767571118090088164183319201962623416288166521374717325477727783488774366518828752156685719506371936565390389449366421764003121527870222366463635755503565576948886549500270853923617105502131147413744106134445544192101336172996285694899193369184729478580729156088510396781959429833186480756083679551496636448965592948187851784038773326247051945050419847742014183947731202815886845707290544057510601285258056594703046836344592652552137008068752009593453607316226118728173928074623094685367823106097921599360019946237993434210687813497346959246469752506246958616909178573976595199392993995567542714654910456860702099012606818704984178079173924071945996323060254707901774527513186809982284730860766536866855516467702911336827563107223346726113705490795365834538637196235856312618387156774118738527722922594743373785695538456246801013905727871016512966636764451872465653730402443684140814488732957847348490003019477888020460324660842875351848364959195082888323206522128104190448047247949291342284951970022601310430062410717971502793433263407995960531446053230488528972917659876016667811937932372453857209607582277178483361613582612896226118129455927462767137794487586753657544861407611931125958512655759734573015333642630767985443385761715333462325270572005303988289499034259566232975782488735029259166825894456894655992658454762694528780516501720674785417887982276806536650641910973434528878338621726156269582654478205672987756426325321594294418039943217000090542650763095588465895171709147607437136893319469090981904501290307099566226620303182649365733698419555776963787624918852865686607600566025605445711337286840205574416030837052312242587223438854123179481388550075689381124935386318635287083799845692619981794523364087429591180747453419551420351726184200845509170845682368200897739455842679214273477560879644279202708312150156406341341617166448069815483764491573900121217041547872591998943825364950514771379399147205219529079396137621107238494290616357604596231253506068537651423115349665683715116604220796394466621163255157729070978473156278277598788136491951257483328793771571459091064841642678309949723674420175862269402159407924480541255360431317992696739157542419296607312393763542139230617876753958711436104089409966089471418340698362993675362621545247298464213752891079884381306095552622720837518629837066787224430195793793786072107254277289071732854874374355781966511716618330881129120245204048682200072344035025448202834254187884653602591506445271657700044521097735585897622655484941621714989532383421600114062950718490427789258552743035221396835679018076406042138307308774460170842688272261177180842664333651780002171903449234264266292261456004337383868335555343453004264818473989215627086095650629340405264943244261445665921291225648893569655009154306426134252668472594914314239398845432486327461842846655985332312210466259890141712103446084271616619001257195870793217569698544013397622096749454185407118446433946990162698351607848924514058940946395267807354579700307051163682519487701189764002827648414160587206184185297189154019688253289309149665345753571427318482016384644832499037886069008072709327673127581966563941148961716832980455139729506687604740915420428429993541025829113502241690769431668574242522509026939034814856451303069925199590436384028429267412573422447765584177886171737265462085498294498946787350929581652632072258992368768457017823038096567883112289305809140572610865884845873101658151167533327674887014829167419701512559782572707406431808601428149024146780472327597684269633935773542930186739439716388611764209004068663398856841681003872389214483176070116684503887212364367043314091155733280182977988736590916659612402021778558854876176161989370794380056663364884365089144805571039765214696027662583599051987042300179465536788567430285974600143785483237068701190078499404930918919181649327259774030074879681484882342932023012128032327460392219687528340516906974194257614673978110715464186273369091584973185011183960482533518748438923177292613543024932562896371361977285456622924461644497284597867711574125670307871885109336344480149675240618536569532074170533486782754827815415561966911055101472799040386897220465550833170782394808785990501947563108984124144672821865459971596639015641941751820935932616316888380132758752601460507676098392625726411120135288591317848299475682472564885533357279772205543568126302535748216585414000805314820697137262149755576051890481622376790414926742600071045922695314835188137463887104273544767623577933993970632396604969145303273887874557905934937772320142954803345000695256980935282887783710670585567749481373858630385762823040694005665340584887527005308832459182183494318049834199639981458773435863115940570443683515285383609442955964360676090221741896883548131643997437764158365242234642619597390455450680695232850751868719449064767791886720306418630751053512149851051207313846648717547518382979990189317751550639981016466414592102406838294603208535554058147159273220677567669213664081505900806952540610628536408293276621931939933861623836069111767785448236129326858199965239275488427435414402884536455595124735546139403154952097397051896240157976832639450633230452192645049651735466775699295718989690470902730288544945416699791992948038254980285946029052763145580316514066229171223429375806143993484914362107993576737317948964252488813720435579287511385856973381976083524423240466778020948399639946684833774706725483618848273000648319163826022110555221246733323184463005504481849916996622087746140216157021029603318588727333298779352570182393861244026868339555870607758169954398469568540671174444932479519572159419645863736126915526457574786985964242176592896862383506370433939811671397544736228625506803682664135541448048997721373174119199970017293907303350869020922519124447393278376156321810842898207706974138707053266117683698647741787180202729412982310888796831880854367327806879771659111654224453806625861711729498038248879986504061563975629936962809358189761491017145343556659542757064194408833816841111166200759787244137082333917886114708228657531078536674695018462140736493917366254937783014074302668422150335117736471853872324040421037907750266020114814935482228916663640782450166815341213505278578539332606110249802273093636740213515386431693015267460536064351732154701091440650878823636764236831187390937464232609021646365627553976834019482932795750624399645272578624400375983422050808935129023122475970644105678361870877172333555465482598906861201410107222465904008553798235253885171623518256518482203125214950700378300411216212126052726059944320443056274522916128891766814160639131235975350390320077529587392412476451850809163911459296071156344204347133544720981178461451077872399140606290228276664309264900592249810291068759434533858330391178747575977065953570979640012224092199031158229259667913153991561438070129260780197022589662923368154312499412259460023399472228171056603931877226800493833148980338548909468685130789292064242819174795866199944411196208730498064385006852620258432842085582338566936649849720817046135376163584015342840674118587581546514598270228676671855309311923340191286170613364873183197560812569460089402953094429119590295968563923037689976327462283900735457144596414108229285922239332836210192822937243590283003884445701383771632056518351970100115722010956997890484964453434612129224964732356126321951155701565824427661599326463155806672053127596948538057364208384918887095176052287817339462747644656858900936266123311152910816041524100214195937349786431661556732702792109593543055579732660554677963552005378304619540636971842916168582734122217145885870814274090248185446421774876925093328785670674677381226752831653559245204578070541352576903253522738963847495646255940378924925007624386893776475310102323746733771474581625530698032499033676455430305274561512961214585944432150749051491453950981001388737926379964873728396416897555132275962011838248650746985492038097691932606437608743209385602815642849756549307909733854185583515789409814007691892389063090542534883896831762904120212949167195811935791203162514344096503132835216728021372415947344095498316138322505486708172221475138425166790445416617303200820330902895488808516797258495813407132180533988828139346049850532340472595097214331492586604248511405819579711564191458842833000525684776874305916390494306871343118796189637475503362820939949343690321031976898112055595369465424704173323895394046035325396758354395350516720261647961347790912327995264929045151148307923369382166010702872651938143844844532639517394110131152502750465749343063766541866128915264446926222884366299462732467958736383501937142786471398054038215513463223702071533134887083174146591492406359493020921122052610312390682941345696785958518393491382340884274312419099152870804332809132993078936867127413922890033069995875921815297612482409116951587789964090352577345938248232053055567238095022266790439614231852991989181065554412477204508510210071522352342792531266930108270633942321762570076323139159349709946933241013908779161651226804414809765618979735043151396066913258379033748620836695475083280318786707751177525663963479259219733577949555498655214193398170268639987388347010255262052312317215254062571636771270010760912281528326508984359568975961038372157726831170734552250194121701541318793651818502020877326906133592182000762327269503283827391243828198170871168108951187896746707073377869592565542713340052326706040004348843432902760360498027862160749469654989210474443927871934536701798673920803845633723311983855862638008516345597194441994344624761123844617615736242015935078520825600604101556889899501732554337298073561699861101908472096600708320280569917042590103876928658336557728758684250492690370934262028022399861803400211320742198642917383679176232826444645756330336556777374808644109969141827774253417010988435853189339175934511574023847292909015468559163792696196841000676598399744972047287881831200233383298030567865480871476464512824264478216644266616732096012564794514827125671326697067367144617795643752391742928503987022583734069852309190464967260243411270345611114149835783901793499713790913696706497637127248466613279908254305449295528594932793818341607827091326680865655921102733746700132583428715240835661522165574998431236278287106649401564670141943713823863454729606978693335973109537126499416282656463708490580151538205338326511289504938566468752921135932220265681856418260827538790002407915892646028490894922299966167437731347776134150965262448332709343898412056926145108857812249139616912534202918139898683901335795857624435194008943955180554746554000051766240202825944828833811886381749594284892013520090951007864941868256009273977667585642598378587497776669563350170748579027248701370264203283965756348010818356182372177082236423186591595883669487322411726504487268392328453010991677518376831599821263237123854357312681202445175401852132663740538802901249728180895021553100673598184430429105288459323064725590442355960551978839325930339572934663055160430923785677229293537208416693134575284011873746854691620648991164726909428982971065606801805807843600461866223562874591385185904416250663222249561448724413813849763797102676020845531824111963927941069619465426480006761727618115630063644321116224837379105623611358836334550102286170517890440570419577859833348463317921904494652923021469259756566389965893747728751393377105569802455757436190501772466214587592374418657530064998056688376964229825501195065837843125232135309371235243969149662310110328243570065781487677299160941153954063362752423712935549926713485031578238899567545287915578420483105749330060197958207739558522807307048950936235550769837881926357141779338750216344391014187576711938914416277109602859415809719913429313295145924373636456473035037374538503489286113141638094752301745088784885645741275003353303416138096560043105860548355773946625033230034341587814634602169235079216111013148948281895391028916816328709309713184139815427678818067628650978085718262117003140003377301581536334149093237034703637513354537634521050370995452942055232078817449370937677056009306353645510913481627378204985657055608784211964039972344556458607689515569686899384896439195225232309703301037277227710870564912966121061494072782442033414057441446459968236966118878411656290355117839944070961772567164919790168195234523807446299877664824873753313018142763910519234685081979001796519907050490865237442841652776611425351538665162781316090964802801234493372427866930894827913465443931965254154829494577875758599482099181824522449312077768250830768282335001597040419199560509705364696473142448453825888112602753909548852639708652339052941829691802357120545328231809270356491743371932080628731303589640570873779967845174740515317401384878082881006046388936711640477755985481263907504747295012609419990373721246201677030517790352952793168766305099837441859803498821239340919805055103821539827677291373138006715339240126954586376422065097810852907639079727841301764553247527073788764069366420012194745702358295481365781809867944020220280822637957006755393575808086318932075864444206644691649334467698180811716568665213389686173592450920801465312529777966137198695916451869432324246404401672381978020728394418264502183131483366019384891972317817154372192103946638473715630226701801343515930442853848941825678870721238520597263859224934763623122188113706307506918260109689069251417142514218153491532129077723748506635489170892850760234351768218355008829647410655814882049239533702270536705630750317499788187009989251020178015601042277836283644323729779929935160925884515772055232896978333126427671291093993103773425910592303277652667641874842441076564447767097790392324958416348527735171981064673837142742974468992320406932506062834468937543016787815320616009057693404906146176607094380110915443261929000745209895959201159412324102274845482605404361871836330268992858623582145643879695210235266673372434423091577183277565800211928270391042391966426911155333594569685782817020325495552528875464466074620294766116004435551604735044292127916358748473501590215522120388281168021413865865168464569964810015633741255098479730138656275460161279246359783661480163871602794405482710196290774543628092612567507181773641749763254436773503632580004042919906963117397787875081560227368824967077635559869284901628768699628053790181848148810833946900016380791075960745504688912686792812391148880036720729730801354431325347713094186717178607522981373539126772812593958220524289991371690685650421575056729991274177149279608831502358697816190894908487717722503860872618384947939757440664912760518878124233683125467278331513186758915668300679210215947336858591201395360301678110413444411030903388761520488296909104689167671555373346622545575975202624771242796225983278405833585897671474205724047439720232895903726148688388003174146490203843590358527993123871042845981608996101945691646983837718267264685264869172948414153004604004299585035164101899027529366867431834955447458124140190754681607770977920579383895378192128847409929537040546962226547278807248685508046571043123854873351653070570784584243335550958221912862797205455466267099131902370311779690892786623112661337671178512943059323281605826535623848164192144732543731002062738466812351691016359252588256806438946389880872735284406462208149513862275239938938734905082625472417781702582044129853760499827899020083498387362992498125742354568439023012261733665820546785671147973065077035475620567428300187473019197310881157516777005071432012726354601912460800451608108641835539669946936947322271670748972850464195392966434725254724357659192969949061670189061433616907056148280980363243454128229968275980226694045642181328624517549652147221620839824594576613342710564957193564431561774500828376935700995419541839029151033187933907614207467028867968594985439789457300768939890070073924697461812855764662265412913204052279071212820653775058280040897163467163709024906774736309136904002615646432159560910851092445162454420141442641660181385990017417408244245378610158433361777292580611159192008414091888191208858207627011483671760749046980914443057262211104583300789331698191603917150622792986282709446275915009683226345073725451366858172483498470080840163868209726371345205439802277866337293290829914010645589761697455978409211409167684020269370229231743334499986901841510888993165125090001163719114994852024821586396216294981753094623047604832399379391002142532996476235163569009445086058091202459904612118623318278614464727795523218635916551883057930657703331498510068357135624341881884405780028844018129031378653794869614630467726914552953690154167025838032477842272417994513653582260971652588356712133519546838335349801503269359798167463231847628306340588324731228951257944267639877946713121042763380872695738609314631539148548792514028885025189788076023838995615684850391995855029256054176767663145354058496296796781349420116003325874431438746248313850214980401681940795687219268462617287403480967931949965604299190281810597603263251746405016454606266765529010639868703668263299050577706266397868453584384057673298268163448646707439990917504018892319267557518354054956017732907127219134577524905771512773358423314008356080926962298894163047287780054743798498545562870729968407382937218623831766524716090967192007237658894226186550487552614557855898773008703234726418384831040394818743616224455286163287628541175946460497027724490799275146445792982549802258601001772437840167723166802004162547244179415547810554178036773553354467030326469619447560812831933095679685582771932031205941616693902049665352189672822671972640029493307384717544753761937017882976382487233361813499414541694736549254840633793674361541081593464960431603544354737728802361047743115330785159902977771499610274627769759612488879448609863349422852847651310277926279743981957617505591300993377368240510902583759345170015340522266144077237050890044496613295859536020556034009492820943862994618834790932894161098856594954213114335608810239423706087108026465913203560121875933791639666437282836752328391688865373751335794859860107569374889645657187292540448508624449947816273842517229343960137212406286783636675845331904743954740664015260871940915743955282773904303868772728262065663129387459875317749973799293043294371763801856280061141619563942414312254397099163565102848315765427037906837175764870230052388197498746636856292655058222887713221781440489538099681072143012394693530931524054081215705402274414521876541901428386744260011889041724570537470755550581632831687247110220353727166112304857340460879272501694701067831178927095527253222125224361673343366384756590949728221809418684074238351567868893421148203905824224324264643630201441787982022116248471657468291146315407563770222740135841109076078464780070182766336227978104546331131294044833570134869585165267459515187680033395522410548181767867772152798270250117195816577603549732923724732067853690257536233971216884390878879262188202305529937132397194333083536231248870386416194361506529551267334207198502259771408638122015980894363561808597010080081622557455039101321981979045520049618583777721048046635533806616517023595097133203631578945644487800945620369784973459902004606886572701865867757842758530645706617127194967371083950603267501532435909029491516973738110897934782297684100117657987098185725131372267749706609250481876835516003714638685918913011736805218743265426063700710595364425062760458252336880552521181566417553430681181548267844169315284408461087588214317641649835663127518728182948655658524206852221830755306118393326934164459415342651778653397980580828158806300749952897558204686612590853678738603318442905510689778698417735603118111677563872589911516803236547002987989628986181014596471307916144369564690909518788574398821730583884980809523077569358851616027719521488998358632323127308909861560777386006984035267826785387215920936255817889813416247486456433211043194821421299793188104636399541496539441501383868748384870224681829391860319598667962363489309283087840712400431022706137591368056518861313458307990705003607588327248867879324093380071864152853317943535073401891193638546730000660453783784472469288830546979000131248952100446949032058838294923613919284305249167833012980192255157050378521810552961623637523647962685751660066539364142273063001648652613891842243501797455993616794063303522111829071597538821839777552812981538570168702202620274678647916644030729018445497956399844836807851997088201407769199261674991148329821854382718946282165387064858588646221611410343570342878862979083418871606214430014533275029715104673156021000043869510583773779766003460887624861640938645252177935289947578496255243925598620521409052346250847830487046492688313289470553891357290706967599556298586669559721686506052072801342104355762779184021797626656484580261591407173477009039475168017709900129391137881248534255949312866653465033728846390649968460644741907524313323903404908195233044389559060547854954620263256676813262435925020249516275607080900436460421497025691488555265022810327762115842282433269528629137662675481993546118143913367579700141255870143319434764035725376914388899683088262844616425575034001428982557620386364384137906519612917777354183694676232982904981261717676191554292570438432239918482261744350470199171258214687683172646078959690569981353264435973965173473319484798758064137926885413552523275720457329477215706850016950046959758389373527538622664943456437071610511521617176237598050900553232154896062817794302268640579555845730600598376482703339859420098582351400179507104569019191359062304102336798080907240196312675268916362136351032648077232914950859151265812143823371072949148088472355286394195993455684156344577951727033374238129903260198160571971183950662758220321837136059718025940870615534713104482272716848395524105913605919812444978458110854511231668173534838253724825347636777581712867205865148285317273569069839935110763432091319780314031658897379628301178409806410175016511072932907832177487566289310650383806093372841399226733384778203302020700517188941706465146238366720632742644336612174011766914919235570905644803016342294301837655263108450172510307540942604409687066288066265900569082451407632599158164499361455172452057020443093722305550217222299706209749268609762787409626448772056043078634808885709143464793241536214303199965695610753570417207285334250171325558818113295504095217830139465216436594262960768570585698507157151317262928960072587601564840556088613165411835958628710665496282599535127193244635791046554389165150954187306071015034430609582302257455974944275067630926322529966338219395202927917973247094559691016402983683080426309910481567503623509654924302589575273521412445149542462972258510120707802110188106722347972579330653187713438466713807546383471635428854957610942841898601794658721444495198801550804042506452191484989920400007310672369944655246020908767882300064337725657385010969899058191290957079866699453765080407917852438222041070599278889267745752084287526377986730360561230710723922581504781379172731261234878334034473833573601973235946604273704635201327182592410906040097638585857716958419563109577748529579836844756803121874818202833941887076311731615289811756429711334181497218078040465077657204457082859417475114926179367379999220181789399433337731146911970737861041963986422166045588965683206701337505745038872111332436739840284188639147633491695114032583475841514170325690161784931455706904169858050217798497637014758914810543205854914100662201721719726878930012101267481270235940855162601689425111458499658315589660460091525797881670384625905383256920520425791378948827579603278877535466861441826827797651258953563761485994485049706638406266121957141911063246061774180577212381659872472432252969098533628440799030007594546281549235506086481557928961969617060715201589825299772803520002610888814176506636216905928021516429198484077446143617891415191517976537848282687018750030264867608433204658525470555882410254654806040437372771834769014720664234434374255514129178503032471263418076525187802925534774001104853996960549926508093910691337614841834884596365621526610332239417467064368340504749943339802285610313083038484571294767389856293937641914407036507544622061186499127249643799875806537850203753189972618014404667793050140301580709266213229273649718653952866567538572115133606114457222800851183757899219543063413692302293139751143702404830227357629039911794499248480915071002444078482866598579406525539141041497342780203520135419925977628178182825372022920108186449448349255421793982723279357095828748597126780783134286180750497175747373730296280477376908932558914598141724852658299510882230055223242218586191394795184220131553319634363922684259164168669438122537135960710031743651959027712571604588486044820674410935215327906816032054215967959066411120187618531256710150212239401285668608469435937408158536481912528004920724042172170913983123118054043277015835629513656274610248827706488865037765175678806872498861657094846665770674577000207144332525555736557083150320019082992096545498737419756608619533492312940263904930982014700371161829485939931199955070455381196711289367735249958182011774799788636393286405807810818657337668157893827656450642917396685579555053188715314552353070355994740186225988149854660737787698781542360397080977412361518245964026869979609564523828584235953564615185448165799966460648261396618720304839119560250381111550938420209894591555760083897989949964566262540514195610780090298667014635238532066032574466820259430618801773091109212741138269148784355679352572808875543164693077235363768226036080174040660997151176880434927489197133087822951123746632635635328517394189466510943745768270782209928468034684157443127739811044186762032954475468077511126663685479944460934809992951875666499902261686019672053749149951226823637895865245462813439289338365156536992413109638102559114643923805213907862893561660998836479175633176725856523591069520326895990054884753424160586689820067483163174286329119633399132709086065074595260357157323069712106423424081597068328707624437165532750228797802598690981111226558888151520837482450034463046505984569690276166958278982913613535306291331427881888249342136442417833519319786543940201465328083410341785272489879050919932369270996567133507711905899945951923990615156165480300145359212550696405345263823452155999210578191371030188979206408883974767667144727314254467923500524618849237455307575734902707342496298879996942094595961008702501329453325358045689285707241207965919809225550560061971283541270202072583994171175520920820151096509526685113897577150810849443508285458749912943857563115668324566827992991861539009255871716840495663991959154034218364537212023678608655364745175654879318925644085274489190918193411667583563439758886046349413111875241038425467937999203546910411935443113219136068129657568583611774564654674861061988591414805799318725367531243470335482637527081353105570818049642498584646147973467599315946514787025065271083508782350656532331797738656666181652390017664988485456054961300215776115255813396184027067814900350252876823607822107397102339146870159735868589015297010347780503292154014359595298683404657471756232196640515401477953167461726208727304820634652469109953327375561090578378455945469160223687689641425960164689647106348074109928546482353083540132332924864037318003195202317476206537726163717445360549726690601711176761047774971666890152163838974311714180622222345718567941507299526201086205084783127474791909996889937275229053674785020500038630036526218800670926674104806027341997756660029427941090400064654281074454007616429525362460261476180471744322889953285828397762184600967669267581270302806519535452053173536808954589902180783145775891280203970053633193821100095443241244197949192916205234421346395653840781209416214835001155883618421164283992454027590719621537570187067083731012246141362048926555668109467076386536083015847614512581588569610030337081197058344452874666198891534664244887911940711423940115986970795745946337170243268484864632018986352827092313047089215684758207753034387689978702323438584381125011714013265769320554911860153519551654627941175593967947958810333935413289702528893533748106257875620364294270257512121137330213811951395756419122685155962476203282038726342066227347868223036522019655729325905068134849292299647248229359787842720945578267329975853818536442370617353517653060396801087899490506654491544577952166038552398013798104340564182403396162494910454712104839439200945914647542424785991096900046541371091630096785951563947332190934511838669964622788855817353221326876634958059123761251203010983867841195725887799206041260049865895027247133146763722204388398558347770112599424691208308595666787531942465131444389971195968105937957532155524204659410081418351120174196853432672343271868099625045432475688702055341969199545300952644398446384346598830418262932239295612610045884644244285011551557765935780379565026806130721758672048541797157896401554276881090475899564605488362989140226580026134158039480357971019004151547655018391755772677897148793477372747525743898158705040701968215101218826088040084551332795162841280679678965570163917067779841529149397403158167896865448841319046368332179115059107813898261026271979696826411179918656038993895418928488851750122504754778999508544083983800725431468842988412616042682248823097788556495765424017114510393927980290997604904428832198976751320535115230545666467143795931915272680278210241540629795828828466355623580986725638200565215519951793551069127710538552661926903526081367717666435071213453983711357500975854405939558661737828297120544693182260401670308530911657973113259516101749193468250063285777004686987177255226525708428745733039859744230639751837209975339055095883623642814493247460522424051972825153787541962759327436278819283740253185668545040893929401040561666867664402868211607294830305236465560955351079987185041352121321534713770667681396211443891632403235741573773787908838267618458756361026435182951815392455211729022985278518025598478407179607904114472041476091765804302984501746867981277584971731733287305281134969591668387877072315968334322509070204019030503595891994666652037530271923764252552910347950343816357721698115464329245608951158732012675424975710520894362639501382962152214033621065422821876739580121286442788547491928976959315766891987305176388698461503354594898541849550251690616888419122873385522699976822609645007504500096116866129171093180282355042553653997166054753907348915189650027442328981181709248273610863801576007240601649547082331349361582435128299050405405333992577071321011503713898695076713447940748097845416328110406350804863393555238405735580863718763530261867971725608155328716436111474875107033512913923595452951407437943144900950809932872153235195999616750297532475931909938012968640379783553559071355708369947311923538531051736669154087312467233440702525006918026747725078958903448856673081487299464807786497709361969389290891718228134002845552513917355978456150353144603409441211512001738697261466786933733154341007587514908295822756919350542184106448264951943804240543255345965248373785310657979037977505031436474651422484768831323479762673689855474944277949916560108528257618964374464656819789319422077536824661110427671936481836360534108748971066866318805026555929568123959680449295166615409802610781691689418764353363449482900125929366840591370059526914934421861891742142561071896846626335874414976973921566392767687720145153302241853125308442727245771161505550519076276250016522166274796257424425420546785767478190959486500575711016264847833741198041625940813327229905891486422127968042984725356237202887830051788539737909455265135144073130049869453403245984236934627060242579432563660640597549471239092372458126154582526667304702319359866523378856244229188278436440434628094888288712101968642736370461639297485616780079779959696843367730352483047478240669928277140069031660709951473154191919911453182543906294573298686613524886500574780251977607442660798300291573030523199052185718628543687577860915726925232573171665625274275808460620177046433101212443409281314659760221360416223031167750085960128475289259463348312408766740128170543067985261868949895004918275008304998926472034986965363326210919830621495095877228260815566702155693484634079776879525038204442326697479264829899016938511552124688935873289878336267819361764023681714606495185508780596635354698788205094762016350757090024201498400967867845405354130050482404996646978558002628931826518708714613909521454987992300431779500489569529280112698632533646737179519363094399609176354568799002814515169743717518330632232942199132137614506411391269837128970829395360832883050256072727563548374205497856659895469089938558918441085605111510354367477810778500572718180809661542709143010161515013086522842238721618109043183163796046431523184434669799904865336375319295967726080853457652274714047941973192220960296582500937408249714373040087376988068797038047223488825819819025644086847749767508999164153502160223967816357097637814023962825054332801828798160046910336602415904504637333597488119998663995617171089911809851197616486499233594328274275983382931099806461605360243604040848379619072542165869409486682092396143083817303621520642297839982533698027039931804024928814430649614747600087654305571672697259114631990688823893005380061568007730984416061355843701277573463708822073792921409548717956947854414951731561828176343929570234710460088230637509877521391223419548471196982303169544468045517922669260631327498272520906329003279972932906827204647650366969765227673645419031639887433042226322021325368176044169612053532174352764937901877252263626883107879345194133825996368795020985033021472307603375442346871647223795507794130304865403488955400210765171630884759704098331306109510294140865574071074640401937347718815339902047036749084359309086354777210564861918603858715882024476138160390378532660185842568914109194464566162667753712365992832481865739251429498555141512136758288423285957759412684479036912662015308418041737698963759002546999454131659341985624780714434977201991702665380714107259910648709897259362243300706760476097690456341576573395549588448948093604077155688747288451838106069038026528318275560395905381507241627615047252487759578650784894547389096573312763852962664517004459626327934637721151028545472312880039058405918498833810711366073657536918428084655898982349219315205257478363855266205400703561310260405145079325925798227406012199249391735122145336707913500607486561657301854049217477162051678486507913573336334257685988361252720250944019430674728667983441293018131344299088234006652915385763779110955708000600143579956351811596764725075668367726052352939773016348235753572874236648294604770429166438403558846422370760111774821079625901180265548868995181239470625954254584491340203400196442965370643088660925268811549596291166168612036195319253262662271108142149856132646467211954801142455133946382385908540917878668826947602781853283155445565265933912487885639504644196022475186011405239187543742526581685003052301877096152411653980646785444273124462179491306502631062903402737260479940181929954454297256377507172705659271779285537195547433852182309492703218343678206382655341157162788603990157495208065443409462446634653253581574814022471260618973060860559065082163068709634119751925774318683671722139063093061019303182326666420628155129647685313861018672921889347039342072245556791239578260248978371473556820782675452142687314252252601795889759116238720807580527221031327444754083319215135934526961397220564699247718289310588394769170851420631557192703636345039529604362885088555160008371973526383838996789184600327073682083234847108471706160879195227388252347506380811606090840124222431476103563328940609282430125462013806032608121942876847907192546246309055749298781661271916548229644317263587524548607563020667656942355342774617635549231817456159185668061686428714964129290560130053913469569829490891003991259088290348791943368696942620662946948514931472688923571615032405542263391673583102728579723061998175868700492227418629077079508809336215346303842967525604369606110193842723883107587771653594778681499030978765900869583480043137176832954871752604714113064847270887246697164585218774442100900090916189819413456305028950484575822161887397443918833085509908566008543102796375247476265353031558684515120283396640547496946343986288291957510384781539068343717740714095628337554413567955424664601335663617305811711646062717854078898495334329100315985673932305693426085376230981047171826940937686754301837015557540822371538037838383342702379535934403549452173960327095407712107332936507766465603712364707109272580867897181182493799540477008369348889220963814281561595610931815183701135104790176383595168144627670903450457460997444500166918675661035889313483800512736411157304599205955471122443903196476642761038164285918037488354360663299436899730090925177601162043761411616688128178292382311221745850238080733727204908880095181889576314103157447684338100457385008523652069340710078955916549813037292944462306371284357984809871964143085146878525033128989319500645722582281175483887671061073178169281242483613796475692482076321356427357261609825142445262515952514875273805633150964052552659776922077806644338105562443538136258941809788015677378951310313157361136026047890761945591820289365770116416881703644242694283057457471567494391573593353763114830246668754727566653059819746822346578699972291792416156043557665183382167059157867799311835820189855730344883681934418305987021880502259192818047775223884407167894780414701414651073580452021499197980812095692195622632313741870979731320870864552236740416185590793816745658234353037283309503729022429802768451559528656923189798000383061378732434546500582722712325031420712488100290697226311129067629080951145758060270806092801504406139446350643069742785469477459876821004441453438033759717384777232052065301037861326418823586036569054773343070911759152582503029410738914441818378779490613137536794654893375260322906277631983337976816641721083140551864133302224787118511817036598365960493964571491686005656771360533192423185262166760222073368844844409234470948568027905894191829969467724456269443308241243846160408284006424867072583661011433404214473683453638496544701067827313169538435919120440283949541956874453676459875488726170687163109591315801609722382049772577307454562979127906177531663252857205858766376754282917933549923678212008601904369428956102301731743150352204665675088491593025926618816581008701658499456495586855628208747248318351516339189292646558880593601275151838235485893426165223086697314511412035659916934103076974774451947043836739600076578628245472064617380804602903639144493859012422380173377038154675297645596518492676039300171943042511794045679862114630138402371099347243455794730048929825402680821621522346560274258486595687074510352794291633405915025075992398611224340312056999780516223878772230396359709132856830486160362127579561601328561866388146004722200580017580282279272167842720649966956840905752590774886105493806116954293569077377792821084159737469613143291808510446953973485067590503662391722108732333169909603363771705474725026941732982890400239372879549386540463828596742216318201530139629734398479588628632934746650690284066719018081265539973675916799759010867483920062877888531102781695087545740384607594616919584610655963327283485609570305572502494416337066573150237126843581984154103154401008430380631442183776750349813408169325201240813452285974626715177152223063741359255747513535160669108359443999692315898156732033027129284241219651936303734407981204656795322986357374589031654007016472204989445629050395873788912680565516464274460174738175296313458739390484560414203426465560422112239134631023161290836446988901247285192778589195228773637440432659264672239982186452797664826673070168802722052338600372842903155828454593854349099449420750911108532138744823216151007808922516285123275724355101999038195993350032641446053470357293073912578481757987468353429629749652545426864234949270336399427519354240001973125098882419600095766257217621860474573769577649582201796258392376391717855799468922496750179251915218219624653575570564228220399546682648329822996167217080156801080799777126517156274295763666959661983507435667132218383358509536665806605597148376773866922551603463644386269977295750658468929599809168949981898588529537874489519527097766262684177088590284321676352132630838812766335363319004134332844347630067982023716933653652880580156390360562722752187272454764258840995216482554453662083811789117725225682611478014242896970967121967502094421226279437073328703410646312100557376727450271638975234111426287828736758358819056742163061523416789476056879277154789714326222041069587947186435439940738639948986836168919377836648327137363654676901173760246643082285362494712605173293777247276797635865806019396287718060679122426813922872134061694882029506831654589707623668302556167559477498715183426989208952182644710514911419441192277010977616645850068963849426165593473112961064282379048216056210094265076173838082479030510998790719611852832556787472942907151041468948104916751035295897242381802288151276582257190705537652455285511598636421244284176256230139538669970308943645907600684938040875210854159851278070333207779865635907968462191534944587677170063778573171211036517486371634098385626541555573292664616402279791195975248525300376741774056125700303625811704838385391207273191845064713669122576415213769896260940351804147432053600369234179035440735703058314741623452840188940808983125191307741823338981880316339159565954543405777784331681162551898060409183018907512170192983622897099598983405484962284289398469847938668614293324543983592637036699355184231661615244505980576745765335552338715678211466689996845227042954589710922163652573965950289645637766038988037941517917867910675199009966139206238732318786758420544279396366759104126821843375015743069045967947046685602358283919759975285865384338189120042853787549302768972168199113340697282255535300044743958830079799736518459131437946494086272149669719100359399974735262764126125995350902609540048669398955899487421379590802893196914845826873123710180229775301190684280440780938156598081694611679374425663244656799606363751546304833112722231812338371779800439731087402647536582575657351059978314264831879619843765495877803685261751835391844920488198629786329743136948511780579298636452193232481339393090754566368038513630619718033957979522539508697432546502659123585049283028832934489284591373621624852528877442891851104093746333590660233239711922814450735588373324057814862662207486215513375036775585494138678352928273109003823116855374520901095101174796663003330352534143230024288248051396631446632656081582045216883922312025671065388459503224002320453633895521539919011035217362720909565500846486605368975498478995875596103167696587161281951919668893326641203784750417081752273735270989343717167642329956935697166213782736138899530515711822960896394055380431939398453970864418654291655853168697537052760701061488025700785387150835779480952313152747735711713643356413242974208137266896149109564214803567792270566625834289773407718710649866150447478726164249976671481383053947984958938064202886667951943482750168192023591633247099185942520392818083953020434979919361853380201407072481627304313418985942503858404365993281651941497377286729589582881907490040331593436076189609669494800067194371424058105327517721952474344983414191979918179909864631583246021516575531754156198940698289315745851842783390581029411600498699307751428513021286202539508732388779357409781288187000829944831476678183644656510024467827445695591845768068704978044824105799710771577579093525803824227377612436908709875189149049904225568041463131309240101049368241449253427992201346380538342369643767428862595140146178201810734100565466708236854312816339049676558789901487477972479202502227218169405159042170892104287552188658308608452708423928652597536146290037780167001654671681605343292907573031466562485809639550080023347676187068086526878722783177420214068980703410506200235273632267291964034093571225623659496432076928058165514428643204955256838543079254299909353199329432966018220787933122323225928276556048763399988478426451731890365879756498207607478270258861409976050788036706732268192473513646356758611212953074644777149423343867876705824452296605797007134458987594126654609414211447540007211790607458330686866231309155780005966522736183536340439991445294960728379007338249976020630448806064574892740547730693971337007962746135534442514745423654662752252624869916077111131569725392943756732215758704952417232428206555322808868670153681482911738542735797154157943689491063759749151524510096986573825654899585216747260540468342338610760823605782941948009334370046866568258579827323875158302566720152604684361412652956519894291184887986819088277339147282063794512260294515707367105637720023427811802621502691790400488001808901847311751199425460594416773315777951735444490965752131026306836047140331442314298077895617051256930051804287472368435536402764392777908638966566390166776625678575354239947427919442544664643315554138265543388487778859972063679660692327601733858843763144148113561693030468420017434061395220072403658812798249143261731617813894970955038369479594617979829257740992171922783223006387384996138434398468502234780438733784470928703890536420557474836284616809363650973790900204118525835525201575239280826462555785658190226958376345342663420946214426672453987171047721482128157607275305173330963455909323664528978019175132987747952929099598069790148515839540444283988381797511245355548426126784217797728268989735007954505834273726937288386902125284843370917479603207479554080911491866208687184899550445210616155437083299502854903659617362726552868081324793106686855857401668022408227992433394360936223390321499357262507480617409173636062365464458476384647869520547719533384203403990244761056010612777546471464177412625548519830144627405538601855708359981544891286863480720710061787059669365218674805943569985859699554089329219507269337550235821561424994538234781138316591662683103065194730233419384164076823699357668723462219641322516076261161976034708844046473083172682611277723613381938490606534404043904909864126903479263503943531836741051762565704797064478004684323069430241749029731181951132935746854550484711078742905499870600373983113761544808189067620753424526993443755719446665453524088287267537759197074526286322840219629557247932987132852479994638938924943286917770190128914220188747760484939855471168524810559991574441551507431214406120333762869533792439547155394213121021954430556748370425907553004950664994802614794524739012802842646689229455664958621308118913500279654910344806150170407268010067948926855360944990373928383520627992820181576427054962997401900837493444950600754365525758905546552402103412862124809003162941975876195941956592556732874237856112669741771367104424821916671499611728903944393665340294226514575682907490402153401026923964977275904729573320027982816062130523130658731513076913832317193626664465502290735017347656293033318520949298475227462534564256702254695786484819977513326393221579478212493307051107367474918016345667888810782101151826314878755138027101379868751299375133303843885631415175908928986956197561123025310875057188962535763225834275763348421016668109884514141469311719314272028007223449941999003964948245457520704922091620614222912795322688239046498239081592961111003756999529251250673688233852648213896986384052437049402152187547825163347082430303521036927849762517317825860862215614519165573478940019558704784741658847364803865995119651409542615026615147651220820245816010801218275982577477652393859159165067449846149161165153821266726927461290533753163055654440793427876550267301214578324885948736899073512166118397877342715872870912311383472485146035661382188014840560716074652441118841800734067898587159273982452147328317214621907330492060817440914125388918087968538960627860118193099489240811702350413554126823863744341209267781729790694714759018264824761112414556423937732224538665992861551475342773370683344173073150805440138894084087253197595538897613986400165639906934600670780501058567196636796167140097031535132386972899001749862948883362389858632127176571330142071330179992326381982094042993377790345261665892577931395405145369730429462079488033141099249907113241694504241391265397274078984953073730364134893688060340009640631540701820289244667315059736321311926231179142794944897281477264038321021720718017561601025111179022163703476297572233435788863537030535008357679180120653016668316780269873860755423748298548246360981608957670421903145684942967286646362305101773132268579232832164818921732941553151386988781837232271364011755881332524294135348699384658137175857614330952147617551708342432434174779579226338663454959438736807839569911987059388085500837507984051126658973018149321061950769007587519836861526164087252594820126991923916722273718430385263107266000047367872474915828601694439920041571102706081507270147619679971490141639274282889578424398001497985658130305740620028554097382687819891158955487586486645709231721825870342960508203415938806006561845735081804032347750084214100574577342802985404049555529215986404933246481040773076611691605586804857302606467764258503301836174306413323887707999698641372275526317649662882467901094531117120243890323410259937511584651917675138077575448307953064925086002835629697045016137935696266759775923436166369375035368699454550392874449940328328128905560530091416446608691247256021455381248285307613556149618444364923014290938289373215312818797541139219415606631622784836152140668972661027123715779503062132916001988806369127647416567067485490795342762338253943990022498972883660263920518704790601584084302914787302246651371144395418253441269003331181914268070735159284180415100555199146564934872796969351992963117195821262627236458009708099166752820365818699111948365866102758375863322993225541477479210421324166848264953111826527351008031659958888814809945737293785681411438021523876706455063233067233939551964260397443829874822322662036352861302543796600943104500158604854027036789711934695579989189112302233381602302236277726084846296189550730850698061500281436425336666311433321645213882557346329366870956708432252564333895997812402164189946978348320376011613913855499933990786652305860332060641949298931012423081105800169745975038516887112037747631577311831360002742502722451570906304496369230938382329175076469684003556425503797106891999812319602533733677437970687713814747552190142928586781724044248049323750330957002929126630316970587409214456472022710796484778657310660832173093768033821742156446602190335203981531618935787083561603302255162155107179460621892674335641960083663483835896703409115513087820138723494714321400450513941428998350576038799343355677628023346565854351219361896876831439866735726040869511136649881229957801618882834124004126142251475184552502502640896823664946401177803776799157180146386554733265278569418005501363433953502870836220605121839418516239153709790768084909674194289061134979961034672077354959593868862427986411437928435620575955500144308051267664432183688321434583708549082240014585748228606859593502657405750939203135881722442164955416889785558265198046245527898343289578416968890756237467281044803018524217706136533236073856228166664597654076844715963930782091017090763377917711485205493367936868430832404126789220929930411890501756484917499452393770674524578019171841679541825554377930299249277892416277257788147974770446005423669346157135208417428211847353652367573702352791459837645712257646122605628127852169580892808988394594406165340521932514843306105322700231133680378433377389724881307874325614952744243584753011150345103737688223837573804282007358586938044331529253129961025096113761670187568525921208929131354473196308440066835155160913925692912175784379179004808848023029304392630921342768601226558630456913133560978156776098711809238440656353136182676923761613389237802972720736243967239854144480757286813436768000573823963610796223140429490728058551444771338682314499547929338131259971996894072233847404542592316639781608209399269744676323921370773991899853301483814622364299493902073285072098040905300059160091641710175605409814301906444379905831277826625762288108104414704097708248077905168225857235732665234414956169007985520848841886027352780861218049418060017941147110410688703738674378147161236141950474056521041002268987858525470689031657094677131822113205505046579701869337769278257145248837213394613987859786320048011792814546859096532616616068403160077901584946840224344163938313618742275417712170336151163782359059685168880561304838542087505126933144171705880517278127917564053282929427357971823360842784676292324980318169828654166132873909074116734612367109059236155113860447246378721244612580406931724769152219217409096880209008801535633471775664392125733993165330324425899852598966724744126503608416484160724482125980550754851232313331300621490042708542735985913041306918279258584509440150719217604794274047740253314305451367710311947544521321732225875550489799267468541529538871443696399406391099267018219539890685186755868574434469213792094590683677929528246795437302263472495359466300235998990248299853826140395410812427393530207575128774273992824866921285637240069184859771126480352376025469714309316636539718514623865421671429236191647402172547787238964043145364190541101514371773797752463632741619269990461595895793940622986041489302535678633503526382069821487003578061101552210224486633247184367035502326672749787730470216165019711937442505629639916559369593557640005236360445141148916155147776301876302136068825296274460238077523189646894043033182148655637014692476427395401909403584437251915352134557610698046469739424511797999048754951422010043090235713636892619493763602673645872492900162675597083797995647487354531686531900176427222751039446099641439322672532108666047912598938351926694497553568096931962642014042788365702610390456105151611792018698900673027082384103280213487456720062839744828713298223957579105420819286308176631987048287388639069922461848323992902685392499812367091421613488781501234093387999776097433615750910992585468475923085725368613605356762146929424264323906626708602846163376051573599050869800314239735368928435294958099434465414316189806451480849292695749412903363373410480943579407321266012450796613789442208485840536446021616517885568969302685188950832476793300404851688934411125834396590422211152736276278672366665845757559585409486248261694480201791748223085835007862255216359325125768382924978090431102048708975715033330963651576804501966025215527080352103848176167004443740572131294252820989545456276344353575741673638980108310579931697917916718271145837435222026387771805250290791645414791173616253155840768495583288190293564201219633684854080865928095131505012602919562576032932512847250469881908146475324342363863860247943921015193235101390117789997483527186469346024554247028375300033725403910085997650987642832802908445662021678362267272292737780213652404028817217012490974899454430826861772239385250883760749742195942655217301733355851389407457348144161511380845358039740277795072051893487170722955427683655826706766313911972211811528466502223383490906676554168336907959409404576472940901354356409277969379842065738891481990225399022315913388145851487225126560927576795873759207013915029216513720851137197522734365458411622066281660256333632074449918511469174455062297146086578736313585389023662557285424516018080487167823688885575325066254262367702604215835160174851981885460860036597606743233346410471991027562358645341748631726556391320606407754779439671383653877377610828300019937359760370467245737880967939894493795829602910746901609451288456550071458091887879542641820145369659962842686882363495879277007025298960996798975941955735253914237782443302746708282008722602053415292735847582937522487377937899136764642153727843553986244015856488692101644781661602962113570056638347990334049623875941092886778920270077504951511405782565295015024484968204744379710872943108541684540513016310902267112951959140520827546866418137305837933236150599142045255880213558474751516267815309465541240524091663857551298894834797423322854504140527354235070335984964593699534959698554244978249586929179182415068053002553370412778703476446244329205906832901886692400222391918714603175399666877477960121790688623311002908668305431787009355066944389131913333586368037447530664502418437136030852288582121720231274167009740351431532131803978033680228154223490183737494117973254478594157962104378787072154814091725163615415163381388912588517924237727229603497305533840942889918919161186249580560073570527227874940321250645426206304469470804277945973817146810395192821550688079136701210109944220737024613687196031491162370967939354636396448139025711768057799751751298979667073292674886430097398814873780767363792886767781170520534367705731566895899181530825761606591843760505051704242093231358724816618683821026679970982966436224723644898648976857100173643547336955619347638598187756855912376232580849341570570863450733443976604780386678461711520325115528237161469200634713570383377229877321365028868868859434051205798386937002783312365427450532283462669786446920780944052138528653384627970748017872477988461146015077617116261800781557915472305214759943058006652042710117125674185860274188801377931279938153727692612114066810156521441903567333926116697140453812010040811760123270513163743154487571768761575554916236601762880220601068655524141619314312671535587154866747899398685510873576261006923021359580838145290642217792987748784161516349497309700794368305080955621264592795333690631936594413261117944256602433064619312002953123619348034504503004315096798588111896950537335671086336886944665564112662287921812114121425167348136472449021275252555647623248505638391391630760976364990288930588053406631352470996993362568102360392264043588787550723319888417590521211390376609272658409023873553418516426444865247805763826160023858280693148922231457758783791564902227590699346481624734399733206013058796068136378152964615963260698744961105368384203105364183675373594176373955988088591188920114871545460924735613515979992999722298041707112256996310945945097765566409972722824015293663094891067963296735505830412258608050740410916678539569261234499102819759563955711753011823480304181029089719655278245770283085321733741593938595853203645590564229716679900322284081259569032886928291260139267587858284765599075828016611120063145411315144108875767081854894287737618991537664505164279985451077400771946398046265077776614053524831090497899859510873112620613018757108643735744708366215377470972660188656210681516328000908086198554303597948479869789466434027029290899143432223920333487108261968698934611177160561910681226015874410833093070377506876977485840324132474643763087889666151972556180371472590029550718424245405129246729039791532535999005557334600111693557020225722442772950263840538309433999383388018839553821540371447394465152512354603526742382254148328248990134023054550811390236768038649723899924257800315803725555410178461863478690646045865826036072306952576113184134225274786464852363324759102670562466350802553058142201552282050989197818420425028259521880098846231828512448393059455162005455907776121981297954040150653985341579053629101777939776957892084510979265382905626736402636703151957650493344879513766262192237185642999150828898080904189181015450813145034385734032579549707819385285699926238835221520814478940626889936085239827537174490903769904145555260249190126341431327373827075950390882531223536876389814182564965563294518709637484074360669912550026080424160562533591856230955376566866124027875883101021495284600804805028045254063691285010599912421270508133194975917146762267305044225075915290251742774636494555052325186322411388406191257012917881384181566918237215400893603475101448554254698937834239606460813666829750019379115061709452680984785152862123171377897417492087541064556959508967969794980679770961683057941674310519254486327358885118436597143583348756027405400165571178309126113117314169066606067613797690123141099672013123730329707678988740099317309687380126740538923612230370779727025191340850390101739924877352408881040807749924412635346413181858792480760553268122881584307471326768283097203149049868884456187976015468233715478415429742230166504759393312132256510189175368566338139736836336126010908419590215582111816677413843969205870515074254852744810154541079359513596653630049188769523677579147319184225806802539818418929888943038224766186405856591859943091324575886587044653095332668532261321209825839180538360814144791320319699276037194760191286674308615217243049852806380129834255379486287824758850820609389214668693729881191560115633701248675404205911464930888219050248857645752083363921499441937170268576222251074166230901665867067714568862793343153513505688216165112807318529333124070912343832502302341169501745502360505475824093175657701604884577017762183184615567978427541088499501610912720817913532406784267161792013428902861583277304794830971705537485109380418091491750245433432217445924133037928381694330975012918544596923388733288616144238100112755828623259628572648121538348900698511503485369544461542161283241700533583180520082915722904696365553178152398468725451306350506984981006205514844020769539324155096762680887603572463913955278222246439122592651921288446961107463586148252820017348957533954255019475442643148903233373926763409115527189768429887783617346613535388507656327107814312435018965109238453660236940276060642119384227665755210663671879603217527184404651560427289869560206997012906367847161654793068868305846508082886614111979138822898112498261434559408961813509226857611474609406147937240008842153535862052780125014270055274468359151840373309373580494342483940467505708347927948338133276237937844629209323999417593374917899786484958148818865149169302451512835579818112344900827168644548306546633975256079615935830821400021951611342337058359111545217293721664061708131602078213341260356852013161345136871600980378712556766143923146458085652084039744217352744813741215277475202259244561520365608268890193913957991844109971588312780020898275935898106482117936157951837937026741451400902833064466209280549839169261068975151083963132117128513257434964510681479694782619701483204392206140109523453209269311762298139422044308117317394338867965739135764377642819353621467837436136161591167926578700137748127848510041447845416464568496606699139509524527949914769441031612575776863713634644477006787131066832417871556281779122339077841275184193161188155887229676749605752053192594847679397486414128879475647133049543555044790277128690095643357913405127375570391806822344718167939329121448449553897728696601037841520390662890781218240141299368590465146519209198605347788576842696538459445700169758422531241268031418456268722581132040056433413524302102739213788415250475704533878002467378571470021087314693254557923134757243640544448132093266582986850659125571745568328831440322798049274104403921761438405750750288608423536966715191668510428001748971774811216784160854454400190449242294333666338347684438072624307319019363571067447363413698467328522605570126450123348367412135721830146848071241856625742852208909104583727386227300781566668914250733456373259567253354316171586533339843321723688126003809020585719930855573100508771533737446465211874481748868710652311198691114058503492239156755462142467550498676710264926176510110766876596258810039163948397811986615585196216487695936398904500383258041054420595482859955239065758108017936807080830518996468540836412752905182813744878769639548306385089756146421874889271294890398025623046812175145502330254086076115859321603465240763923593699949180470780496764486889980902123735780457040380820770357387588525976042434608851075199334470112741787878845674656640471901619633546770714090590826954225196409446319547658653032104723804625249971910690110456227579220926904132753699634145768795242244563973018311291451151322757841320376225862458224784696669785947914981610522628786944136373683125108310682898766123782697506343047263278453719024447970975017396831214493357290791648779915089163278018852504558488782722376705263811803792477835540018117452957747339714012352011459901984753358434861297092928529424139865507522507808919352104173963493428604871342370429572757862549365917805401652536330410692033704691093097588782938291296447890613200063096560747882082122140978472301680600835812336957051454650181292694364578357815608503303392466039553797630836137289498678842851139853615593352782103740733076818433040893624460576706096188294529171362940967592507631348636606011346115980434147450705511490716640635688739020690279453438236930531133440901381392849163507484449076828386687476663619303412376248380175840467851210698290605196112357188811150723607303158506622574566366740720668999061320627793994112805759798332878792144188725498543014546662945079670707688135022230580562225942983096887732856788971494623888272184647618153045844390967248232348259587963698908456664795754200195991919240707615823002328977439748112690476546256873684352229063217889227643289360535947903046811114130586348244566489159211382258867880972564351646404364328416076247766114349880319792230537889671148058968061594279189647401954989466232962162567264739015818692956765601444248501821713300527995551312539849919933907083138030214072556753022600033565715934283182650908979350869698950542635843046765145668997627989606295925119763672907762567862769469947280606094290314917493590511523235698715397127866718077578671910380368991445381484562682604003456798248689847811138328054940490519768008320299631757043011485087384048591850157264392187414592464617404735275250506783992273121600117160338604710710015235631159734711153198198710616109850375758965576728904060387168114313084172893710817412764581206119054145955378853200366615264923610030157044627231777788649806700723598889528747481372190175074700005571108178930354895017924552067329003818814068686247959272205591627902292600592107710510448103392878991286820705448979977319695574374529708195463942431669050083984398993036790655541596099324867822475424361758944371791403787168166189093900243862038610001362193667280872414291108080291896093127526202667881902085595708111853836166128848729527875143202956393295910508349687029060692838441522579419764824996318479414814660898281725690484184326061946254276693688953540732363428302189694947766126078346328490315128061501009539164530614554234923393806214007779256337619373052025699319099789404390847443596972052065999017828537676265683558625452697455260991024576619614037537859594506363227095122489241931813728141668427013096050734578659047904243852086508154491350136491698639048125666610843702294730266721499164849610746803261583352580352858275799038584091667618877199539888680431991650866887781701439663176815592262016991396613153738021294160006906947533431677802632207226265881842757216055461439677336258462997385077307751473833315101468395296411397329672457933540390136107395245686243008096720460995545708974893048753897955544443791303790422346037768729236001386569593952300768091377768847789746299699489949016141866131552200856673695770822720338936659590666350594330040363762591189195691561626122704788696510356062748423100605472091437069471661080277379848576543481249822444235828329813543645124092220896643987201997945619030397327254617823136363375927622656301565813545578319730419339269008282952718252138855126583037630477490625995514925943105307478901043009876580816508144862607975129633326675259272351611791836777128931053144471668835182920514343609292493191180249366051791485330421043899773019267686085347768149502299280938065840007311767895491286098112311307002535600347898600653805084532572431553654422067661352337408211307834360326940015926958459588297845649462271300855594293344520727007718206398887404742186697709349647758173683580193168322111365547392288184271373843690526638607662451284299368435082612881367358536293873792369928837047900484722240370919885912556341130849457067599032002751632513926694249485692320904596897775676762684224768120033279577059394613185252356456291805905295974791266162882381429824622654141067246487216174351317397697122228010100668178786776119825961537643641828573481088089988571570279722274734750248439022607880448075724807701621064670166965100202654371260046641935546165838945950143502160890185703558173661823437491622669077311800121188299737319891006060966841193266075165452741829459541189277264192546108246351931647783837078295218389645376236304858042774417907169146356546201215125418664885396161542055152375000426794253417764590821513675258479774465114750438460596325820468809667795709044645884673847481638045635188183210386594798204376334738389017759714236223057776395541011294523488098341476645559342209402059733452337956309441446698222457026367119493286653989491344225517746402732596722993581333110831711807234044326813737231209669052411856734897392234152750707954137453460386506786693396236535556479102508529284294227710593056660625152290924148057080971159783458351173168204129645967070633303569271821496292272073250126955216172649821895790908865085382490848904421755530946832055636316431893917626269931034289485184392539670922412565933079102365485294162132200251193795272480340133135247014182195618419055761030190199521647459734401211601239235679307823190770288415814605647291481745105388060109787505925537152356112290181284710137917215124667428500061818271276125025241876177485994084521492727902567005925854431027704636911098800554312457229683836980470864041706010966962231877065395275783874454229129966623016408054769705821417128636329650130416501278156397799631957412627634011130135082721772287129164002237230234809031485343677016544959380750634285293053131127965945266651960426350406454862543383772209428482543536823186182982713182489884498260285705690699045790998144649193654563259496570044689011049923939218088155626191834404362264965506449848521612498442375928443642612004256628602157801140467879662339228190804577624109076487087406157070486658398144845855803277997327929143195789110373530019873110486895656281917362036703039179710646309906285483702836118486672219457621775034511770110458001291255925462680537427727378863726783016568351092332280649908459179620305691566806180826586923920561895421631986004793961133953226395999749526798801074576466538377400437463695133685671362553184054638475191646737948743270916620098057717103475575333102702706317395612448413745782734376330101853438497450236265733191742446567787499665000938706441886733491099877926005340862442833450486907338279348425305698737469497333364267191968992849534561045719338665222471536681145666596959735075972188416698767321649331898967182978657974612216573922404856900225324160367805329990925438960169901664189038843548375648056012628830409421321300206164540821986138099462721214327234457806819925823202851398237118926541234460723597174777907172041523181575194793527456442984630888846385381068621715274531612303165705848974316209831401326306699896632888532682145204083110738032052784669279984003137878996525635126885368435559620598057278951754498694219326972133205286374577983487319388899574634252048213337552584571056619586932031563299451502519194559691231437579991138301656117185508816658756751184338145761060365142858427872190232598107834593970738225147111878311540875777560020664124562293239116606733386480367086953749244898068000217666674827426925968686433731916548717750106343608307376281613984107392410037196754833838054369880310983922140260514297591221159148505938770679068701351029862207502287721123345624421024715163941251258954337788492834236361124473822814504596821452253550035968325337489186278678359443979041598043992124889848660795045011701169092519383155609441705397900600291315024253848282782826223304151370929502192196508374714697845805550615914539506437316401173317807741497557116733034632008408954066541694665746735785483133770133628948904397670025863002540635264006601631712883920305576358989492412827022489373848906764385339931878608019223108328847459816417701264089078551777830131616162049792779670521847212730327970738223860581986744668610994383049960437407323195784473254857416239738852016202384784256163512597161783106850156299135559874758848151014815490937380933394074455700842090155903853444962128368313687375166780513082594599771257467939781491953642874321122421579851584491669362551569370916855252644720786527971466476760328471332985501945689772758983450586004316822658631176606237201721007922216410188299330808409384014213759697185976897042759041500946595252763487628135867117352364964121058854934496645898651826545634382851159137631569519895230262881794959971545221250667461174394884433312659432286710965281109501693028351496524082850120190831078678067061851145740970787563117610746428835593915985421673115153096948758378955979586132649569817205284291038172721213138681565524428109871168862743968021885581515367531218374119972919471325465199144188500672036481975944167950887487934416759598361960010994838744709079104099785974656112459851972157558134628546189728615020774374529539536929655449012953097288963767713353842429715394179547179095580120134210175150931491664699052366350233024087218654727629639065723341455005903913890253699317155917179823065162679744711857951506573868504088229934804445549850597823297898617029498418376255258757455303112991914341109413088238114443068843062655305601658801408561023324210300218460588586954418502977463085858496130037238190325162225570729975710727306066072916922978033647048840958711228045188511908718588299514331534128549297173849768523136276076868494780364948299904475715771141080958058141208956059471668626290036145602625334863284986816039463372436667112964460292915746181117789169695839947080954788863503281129626899231110099889317815313946681882028368363373822281414974006917942192888817139116283910295684918233358930813360131488748366464224381776081007739183393749346933644748150564933649323157235306109385796839902153381449126925350768211098738352197507736653475499431740580563099143218212547336281359488317681489194306530426029773885492974570569448783077945878865062970895499843760181694031056909587141386804846359853684034105948341788438963179956468815791937174656705047441528027712541569401365862097760735632832966564135817028088013546326104892768731829917950379944446328158595181380144716817284996793061814177131912099236282922612543236071226270324572637946863533391758737446552006008819975294017572421299723542069630427857950608911113416534893431149175314953530067419744979017235181671568754163484949491289001739377451431928382431183263265079530371177806185851153508809998200482761808307209649636476943066172549186143700971387567940218696710148540307471561091358933165600167252126542502898612259306484105898847129649230941215144563947889999327145875969555737090855150648002321476443037232466147111552578583071024936898814562568786834745518893385181791667579054210421036349316257870476543126790661216644142285017446278477132740595579600648343288827864837043456066966456899746910373987712891593313271266247505582258634928427718355831641593667712218537642376222104779338956378722902509543014182257180331300148113377736941508488867501893156994849838936052666818012783912005801431596441910546663236810148207799356523056490420711364192200177189107935243234322761787712568251126481332974354926568682748715986654943041648468220593921673359485057849622807932422649812705271398407720995707236227009245067665680069149966555737866411877079767754867028786431817941521796178310655030287157272282250812017060713380339641841211253856248920130010782462165136989511064611133562443838185366273563783436921279354709230119655914915800561707258518503167289370411936374780625824298250726464801821523430268081486978164824349353456855843696378384153838051184406043696871666416514036129729992912630842812149152469877429332305214999981829046119471676727503742221367186614654042534463141660649871499001000660041544868437352208483059495953182872280520828676300361091734508632133033647289584176588755345227938480297724485711815574893561311524926772006362198369980664159549388683836411891430443767715498026544959061738265591178545999378510861446014967645550103653971251138583505085112442517772923814396233043724036032603181442991365750246012787514117944901305803452199992701148071712847770301254994886841867572975189214295652512486943983729047410363121899124217339550688778643130750024823361832738729697376598820053895902935486054979802320400472236873557411858132734337978931582039412878989728973298812553514507641535360519462112217000676321611195841029252568536561813138784086477147099724553013170761712163186600291464501378587854802096244703771373587720086738054108140042311418525803293267396324596914044834665722042880679280616029884043400536534009706581694636096660911110968789751801325224478246957913251892122653056085866541115373584912790254654369020869419871125588453729063224423222287139122012248769976837147645598526739225904997885514250047585260297929306159913444898341973583316070107516452301310796620382579278533125161760789984630103493496981494261055367836366022561213767081421091373531780682420175737470287189310207606953355721704357535177461573524838432101571399813798596607129664438314791296359275429627129436142685922138993054980645399144588692472767598544271527788443836760149912897358259961869729756588978741082189422337344547375227693199222635973520722998387368484349176841191020246627479579564349615012657433845758638834735832242535328142047826934473129971189346354502994681747128179298167439644524956655532311649920677163664580318205849626132234652606175413532444702007661807418914040158148560001030119994109595492321434406067634769713089513389171050503856336503545166431774489640061738861761193622676890576955693918707703942304940038440622614449572516631017080642923345170422426679607075404028551182398361531383751432493056398381877995594942545196756559181968690885283434886050828529642437578712929439366177362830136595872723080969468398938676366226456791132977469812675226595621009318322081754694778878755356188335083870248295346078597023609865656376722755704495258739871812593441903785275571333409842450127258596692434317689018966145404453679047136294238156127656824247864736176671770647002431119711090007474065945650315375044177982192306323700872039212085499569681061379189029961178936752146022386905665481382858280449537530160921422195940638787074787991194920898374091788534417523064715030278397979864517336625329511775105559014160459873338186887977858817291976604516353353556047648420520888811722831990044504284486852338334530105533929637308039738230604714104525470094899407601215247602819963846343554852932377161410869591950786873276075400085220065031871239272857835807010762542769655355964789450166013816295177908531139811092831583216931563867459747449584385282701658246192092219529134323496779345585613140207765996142546463288677356891785576835169608392864188830094883324700447958316931533832382377876344426323456301679513671047510469669001217777128065522453689371871451567394733440447280450959433090683667110655953338602938000999949010642769859623260401863733572846679531229683156358145420890540651226419162015504500430562136991850941034609601030543816694795964585804425194905110733387679946734471718615647723811737035654917628707589456035519195603962301157866323750234725054461073979402475184415558178087962822231972692984516683306919505079993357259165675557294585962182052650473353712351623662770479333289322136141858785972771685682725303734836891911847197133753088446777943274857148827821608844765700041403499921376794209627560883081509438030705666022764678117533361028187800710219794428777313146387857817205661409023041499923248268982477222109852189758140879763486146763606368674611966620347304608917277240045953051376938375381543486981101990651706961774052218247422657652138152740612699012706880875386408669901461740890540981877671880076124151967064152117653084325544261017536348281196837493395825742541244634247233586360777980960199745187758845459645895956779558869098404768259253477849930457883128541747079059795909431627722327844578918694214929451540174214623240300841907975296782445969183509474202123617940309048634960534054931299919496087957952586977170236680033862505764938088740994009589948109397983231108838769236490221499111120870639202892490698435333152727991330986335454324971441378059132240814960156485679843966464780280409057580889190254236606774500413415794312112501275232250148067232979652230488493751166084976116412777395311302041566848265531411348993243747890268935173904043294851610659785832253168204202834993641595980197343889883020994152152288611175126686173051956249367180053845637855129171848417841594797435580617856680758491080185805695567990185198397660693358224779136504562705766735170961550493338390452612404395517449136885115987454340932040102218982707539212403241042424451570052968378815749468441508011138612561164102477190903050040240662278945607061512108266146098662040425010583978098192019726759010749924884966139441184159734610382401178556739080566483321039073867083298691078093495828888707110651559651222542929154212923108071159723275797510859911398076844732639426419452063138217862260999160086752446265457028969067192282283045169111363652774517975842147102219099906257373383472726498678244401048998507631630668050267115944636293525120269424810854530602810627264236538250773340575475701704367039596467715959261029438313074897245505729085688496091346323165819468660587092144653716755655531962091865952628448253731353698162517351930115341581171353292035873164168839107994000677266031617527582917398395852606454113318985505747847121053505795649095931672167565624818782002769963734155880000867852567422461511406015760115910256449002264980039498403358091309140197877843650167960167465370287466062584346329708303725980494653589318912163976013193079476972058034710553111117215859219066231028099212084069283091906017370764654655683413207556315315006453462321007133584907633048328153458698497332599801187479664273140279381289961720524540674695271948079930396730194274036466594154400092799908634806622334906695224044652158992864203435098858422692019340575496840904812955522654754650713532842543496616084954788090727649930252702815067862810825243222979985391759845188868387004477101866772159439708514664612871148749531862180941719676843144666435175837688436786081446319641912566574047718699160915550910878919431253671945651261878486910876729910565595155159739659034383628124629118117760949411880105946336671039049777312004243578115790429823045072038322781246413671297959415082918378213212876890545963586369344879749784841123274921331663162812456388238288715648447883142417650147980187858215768793063001153788998014623690135803753306246148576074932567807682651045738059018831237617271889933790487113395588485234240255002352200613574914318259142479829367775490496399350755839668967578364316618369307625603528602940662803255416535431518013714821941772672244005268401996533334184004345525296592918502940131600651124395297874364222806977720437363717873457948420238745151249157913139411148608416429347958793681868609689684640858334131017858142710955416293375915178392341303110543328703526599993904966822112768158316511246866451167351378214345336650598328347443536290312393672084593164394941881138607974670134709640378534907149089842317891739783650654751982883367395714360000003439863363212091718954899055748693397700245632475954504411422582410783866837655467400137324322809113692670682805397549111166171102397437749479335174036135005397581475520834285772800986189401984375446435081498218360112577632447389452051636938585136484259964518361856989088721789764694721246807900330925083496645841656554261294195108847197209106605105540933731954888406444080280579549008076040034154662137669606444293774985897353625591959618552448187940317374508256072895120945456562159540405425814886929842786582357673195799285293120866275922366115137445767916063621675267440451221051052090834707443986137829082352772895849625656881972792768694795806100573787084121444815034797422312103295359297822377134077549545477791813823542607184617108389097825964406170543546968567030745411634244134486308676327949177682923093183221341455482591367202823284396549001805653203960795517074496039006696990334199278212696767771835209083959545341866777944872740383733381985235884202840150981579594685874537989503257362809837592216229258598599123843993575573285028613155970362934249814178056461615863415338635077223269996508860870999964899373049307170967888740149746147542880387421250689212155876692242387434701120990859082164073576380817386959755176083877600277517253037133445654852635661720197563001580049790223419586738061442401502436288957503206533690825756785507020555105572381878574650371086308158185862815883054564662297694803970618265491385181326737485227188267917919091354407852685476254126683398240534022469989966652573155637645862251862823092085424412805997628505488913098331761884983352975136073772030571342739638126588567405013841074788943393996603591853934198416322617654857376671943132840050626295140357877264680649549355746326408186979718630218760025813995719923601345374229758918285167511358171472625828596940798518571870075823122317068134867930884899275181661399609753105295773584618525865211893339375771859916335112163441037910451845019023066893064178977808158101360449495409665363660370075881004450265734935127707426742578608784898185628869980851665713320835842613381142623855420315774246613108873106318111989880289722849790551075148403702290580483052731884959994156606537314021296702220821915862905952604040620011815269664910068587592655660567562963361434230232810747488395040380984981860056164646099819257616235478710913832967563761506732550860683433720438748186791668975746563456020002562889601191100980453350423842063824039434163502977688802779835087481178298349417211674919425601608685332435385951152061809031241698182079314615062073826097180458265687043623935757495737332781578904386011378078508110273049446611821957450170106059384336519458628360682108585130499820420578458577175933849015564447305834515291412561679970569657426139901681932056241927977282026714297258700193234337873153939403115411184101414292741703537542003698760608765500109345299007034032401334806388514095769557147190364152027721127070187421548123931953220997506553022646844227700020589045922742423904937051507367764629844971682121994198274794049092601715727439368569721862936007387077810797440975556627807371228030350048829843919546433753355787895064018998685060281902452191177018634505171087023903398550540704454189088472042376499749035038518949505897971286631644699407490959473411581934618336692169573605081585080837952036335619947691937965065016808710250735070825260046821242820434367245824478859256555487861614478717581068572356895150707602217433511627331709472765932413249132702425519391509083601346239612335001086614623850633127072987745618984384288764099836164964775714638573247333226653894523588365972955159905187411779288608760239306160016168434070611663449248395156319152882728822831375458678269830696691220130954815935450754923554167766876455212545681242936427474153815692219503331560151614492247512488957534835926226263545406704767033866410025277276800886383266629488582740369655329362236090572479794734434077704284318507901973469071141230364111729224929307731939309795452877412451183953480382210373644697046967493042810911797232448615413264031578430955396671061468083815548947146733652483679138566431084747848676243012018489329109615281108087617422779131629345494425395422727309645057976122885347393189600810965202090151104579377602529543130188938184010247010134929317443562883578609861545691161669857388024973756940558138630581099823372565164920155443216861690537054630176154809626620800633059320775897175589925862195462096455464624399535391743228225433267174308492508396461328929584567927365409119947616225155964704061297047759818551878441419948614013153859322060745185909608884280218943358691959604936409651570327527570641500776261323783648149005245481413195989296398441371781402764122087644989688629798910870164270169014007825748311598976330612951195680427485317886333041169767175063822135213839779138443325644288490872919067009802496281560626258636942322658490628628035057282983101266919109637258378149363774960594515216932644945188292639525772348420077356021656909077097264985642831778694777804964343991762549216500608626285329471055602670413384500507827390640287529864161287496473708235188892189612641279553536442286955430551308700009878557534223100547153412810957024870812654319123261956462149376527526356402127388765103883255007364899937167183280028398832319373301564123277185395654932422977953016534830128490677845037490891749347389015649588574802194996722621185874361039774946338633057887487405540005440439344888192044102134790034598411927024921557026873700970995205391930979319495883265922171508324621942300185974396706491149559411733728199869021311629886680267446443489233020607003821262841723679627307191405008084085703978151998148822390059948911946474438682533745889962375133378280532928272016815977970066488394482446332210928320504045983008943565954267256879714918703447338237767914829203283196838105907715727191903042365315650957464549643425328069510396558733549803850995143463506175361480050195045201350200180281506933241918267855737764414097080945745624854867704904368368717590918057269794010465019484853146726642978667687697789291431128505043098192949736165944259471754765135205245072597538577958372797702972231435199958499522344049394502115428867244188717409524554771867484911475031801773304689909317974472957035192387686405544278134169807249382219749124257510162187439772902147704638010731470653154201300583810458905006764557332998149945854655105526374914354195867992595981412218735238407957416123372264063860431988936249867649693592569592128495906254446474331759999685163660305216426770428154681777589339252115538590526823311608302751194384823861552852465010329467297198112105314125898165100120742688143577590825227466863206188376830450921784582526239594189673003640808624233657620979111641766331328852352062487922978959456450333733139422384778582717195412347860434376165241568717943562570215636666680088531006728947033079540804583324192188488870712275670333173939262509073556164513677064199539111948881240659821685787131385056850623094155206877987539740658484250135205615103489821873770245063583314243624807432542464195984647411575625441010389671576677263196442524931941806472423789334668561083789808830313571333157729435664956078125304917594015895146954965223118559669048559467607968190167266634650186182955669893965019614544401768162810604465068448139561667220729261210164692339016793399632833013163850830967942792934551268435760356901970523138364640961311774904600772840862214747547653221505518116489887879087780918009050706040061220010051271575991225725282523378026809030528461581739558198122397010092017202251606352922464781615533532275453264543087093320924631855976580561717446840450048285353396546862678852330044967795580761661801833668792312510460809773895565488962815089519622093675058841609752282328250433712970186608193748968699961301486924694482420723632912367052542145464162968910442981633373266871675946715392611950649224725627254543274193495995569590243279097174392258098103601486364409101491734183079646345064833303404765711827040276868271418084574998493392039317445402616663674646668754385093967129918067471909885312710726724428584870694307099756567949198418996425748884764622030325637751112534060087936904565779272035205921345924272965206683338510673615276261016026647772485083344719891986802656197236420847504962661607797092906844757798251795569758235084371746103310387911789239441630112634077535773520558040066982523191225570519133631407211349723226549151062961739050617857127509403623146700931176133132018631158730886798239298009805089491510788371194099750375473674305745187265414016446924576792185753680363289139664155342066705623272936001177781498886100830877849571709880858667023104043242526785955562077310543072298032125941107957349146684680220501816192150766649106862033378713826058987655210423668198670177861672671972374156917880001690656659046965316154923604061891820982414006103779407166342002735828911994182647812782659666207030384795881442790246669264032799404016800137293477301530941805070587421153284642203006550763966756168318897005152026656649929417382840327305940740147117478464839241225676523593418554066440983706083636457657081801664285044258224551650808864421212113914352453935225522162483791737330329812349528984098613273709957407786789349311975204237925022851375880436791854547836416773151821457226504640800104202100410766027807729152555503218182387221708112766208665317651926458452495269685376314437998340336947124447247796973890514941120010934140073794061859447165516612674930799374705772930521750426383798367668159183589049652163726492960837147204067428996276720315410211504333742057182854090136325721437592054640471894328548696883599785122262130812989581571391597464534806099601555877223193450760315411663112963843719400333736013305526352571490454327925190794007111504785378036370897340146753465517470747096935814912797188187854376797751675927822300312945518595042883902735494672667647506072643698761394806879080593531793001711000214417701504495496412454361656210150919997862972495905809191825255486358703529320142005857057855419217730505342687533799076038746689684283402648733290888881745453047194740939258407362058242849349024756883352446212456101562729065130618520732925434179252299417447855189995098959999877410951464170076989305620163502192692653166599093238118295411937545448509428621839424186218067457128099385258842631930670182098008050900019819621758458932516877698594110522845465835679362969619219080897536813210484518784516230623911878024604050824909336069998094776253792973597037759066145994638578378211017122446355845171941670344732162722443265914858595797823752976323442911242311368603724514438765801271594060878788638511089680883165505046309006148832545452819908256238805872042843941834687865142541377686054291079721004271658 diff --git a/vendor/github.com/klauspost/compress/testdata/endnonzero.bin b/vendor/github.com/klauspost/compress/testdata/endnonzero.bin new file mode 100644 index 00000000000..cf08368af17 Binary files /dev/null and b/vendor/github.com/klauspost/compress/testdata/endnonzero.bin differ diff --git a/vendor/github.com/klauspost/compress/testdata/endzerobits.bin b/vendor/github.com/klauspost/compress/testdata/endzerobits.bin new file mode 100644 index 00000000000..d9952f92115 Binary files /dev/null and b/vendor/github.com/klauspost/compress/testdata/endzerobits.bin differ diff --git a/vendor/github.com/klauspost/compress/testdata/gettysburg.txt b/vendor/github.com/klauspost/compress/testdata/gettysburg.txt new file mode 100644 index 00000000000..2c9bcde3605 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/gettysburg.txt @@ -0,0 +1,29 @@ + Four score and seven years ago our fathers brought forth on +this continent, a new nation, conceived in Liberty, and dedicated +to the proposition that all men are created equal. + Now we are engaged in a great Civil War, testing whether that +nation, or any nation so conceived and so dedicated, can long +endure. + We are met on a great battle-field of that war. + We have come to dedicate a portion of that field, as a final +resting place for those who here gave their lives that that +nation might live. It is altogether fitting and proper that +we should do this. + But, in a larger sense, we can not dedicate - we can not +consecrate - we can not hallow - this ground. + The brave men, living and dead, who struggled here, have +consecrated it, far above our poor power to add or detract. +The world will little note, nor long remember what we say here, +but it can never forget what they did here. + It is for us the living, rather, to be dedicated here to the +unfinished work which they who fought here have thus far so +nobly advanced. It is rather for us to be here dedicated to +the great task remaining before us - that from these honored +dead we take increased devotion to that cause for which they +gave the last full measure of devotion - + that we here highly resolve that these dead shall not have +died in vain - that this nation, under God, shall have a new +birth of freedom - and that government of the people, by the +people, for the people, shall not perish from this earth. + +Abraham Lincoln, November 19, 1863, Gettysburg, Pennsylvania diff --git a/vendor/github.com/klauspost/compress/testdata/normcount2.bin b/vendor/github.com/klauspost/compress/testdata/normcount2.bin new file mode 100644 index 00000000000..39050daec3c --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/normcount2.bin @@ -0,0 +1 @@ +868000113000000fd9F055125272181835410155551151-0_0040Y2BW4K_0x_j4L___e__331ms__QSlz_I__ \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/testdata/pi.txt b/vendor/github.com/klauspost/compress/testdata/pi.txt new file mode 100644 index 00000000000..ca99bbc2a25 --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/pi.txt @@ -0,0 +1 @@ +3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275900994657640789512694683983525957098258226205224894077267194782684826014769909026401363944374553050682034962524517493996514314298091906592509372216964615157098583874105978859597729754989301617539284681382686838689427741559918559252459539594310499725246808459872736446958486538367362226260991246080512438843904512441365497627807977156914359977001296160894416948685558484063534220722258284886481584560285060168427394522674676788952521385225499546667278239864565961163548862305774564980355936345681743241125150760694794510965960940252288797108931456691368672287489405601015033086179286809208747609178249385890097149096759852613655497818931297848216829989487226588048575640142704775551323796414515237462343645428584447952658678210511413547357395231134271661021359695362314429524849371871101457654035902799344037420073105785390621983874478084784896833214457138687519435064302184531910484810053706146806749192781911979399520614196634287544406437451237181921799983910159195618146751426912397489409071864942319615679452080951465502252316038819301420937621378559566389377870830390697920773467221825625996615014215030680384477345492026054146659252014974428507325186660021324340881907104863317346496514539057962685610055081066587969981635747363840525714591028970641401109712062804390397595156771577004203378699360072305587631763594218731251471205329281918261861258673215791984148488291644706095752706957220917567116722910981690915280173506712748583222871835209353965725121083579151369882091444210067510334671103141267111369908658516398315019701651511685171437657618351556508849099898599823873455283316355076479185358932261854896321329330898570642046752590709154814165498594616371802709819943099244889575712828905923233260972997120844335732654893823911932597463667305836041428138830320382490375898524374417029132765618093773444030707469211201913020330380197621101100449293215160842444859637669838952286847831235526582131449576857262433441893039686426243410773226978028073189154411010446823252716201052652272111660396665573092547110557853763466820653109896526918620564769312570586356620185581007293606598764861179104533488503461136576867532494416680396265797877185560845529654126654085306143444318586769751456614068007002378776591344017127494704205622305389945613140711270004078547332699390814546646458807972708266830634328587856983052358089330657574067954571637752542021149557615814002501262285941302164715509792592309907965473761255176567513575178296664547791745011299614890304639947132962107340437518957359614589019389713111790429782856475032031986915140287080859904801094121472213179476477726224142548545403321571853061422881375850430633217518297986622371721591607716692547487389866549494501146540628433663937900397692656721463853067360965712091807638327166416274888800786925602902284721040317211860820419000422966171196377921337575114959501566049631862947265473642523081770367515906735023507283540567040386743513622224771589150495309844489333096340878076932599397805419341447377441842631298608099888687413260472156951623965864573021631598193195167353812974167729478672422924654366800980676928238280689964004824354037014163149658979409243237896907069779422362508221688957383798623001593776471651228935786015881617557829735233446042815126272037343146531977774160319906655418763979293344195215413418994854447345673831624993419131814809277771038638773431772075456545322077709212019051660962804909263601975988281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267945612753181340783303362542327839449753824372058353114771199260638133467768796959703098339130771098704085913374641442822772634659470474587847787201927715280731767907707157213444730605700733492436931138350493163128404251219256517980694113528013147013047816437885185290928545201165839341965621349143415956258658655705526904965209858033850722426482939728584783163057777560688876446248246857926039535277348030480290058760758251047470916439613626760449256274204208320856611906254543372131535958450687724602901618766795240616342522577195429162991930645537799140373404328752628889639958794757291746426357455254079091451357111369410911939325191076020825202618798531887705842972591677813149699009019211697173727847684726860849003377024242916513005005168323364350389517029893922334517220138128069650117844087451960121228599371623130171144484640903890644954440061986907548516026327505298349187407866808818338510228334508504860825039302133219715518430635455007668282949304137765527939751754613953984683393638304746119966538581538420568533862186725233402830871123282789212507712629463229563989898935821167456270102183564622013496715188190973038119800497340723961036854066431939509790190699639552453005450580685501956730229219139339185680344903982059551002263535361920419947455385938102343955449597783779023742161727111723643435439478221818528624085140066604433258885698670543154706965747458550332323342107301545940516553790686627333799585115625784322988273723198987571415957811196358330059408730681216028764962867446047746491599505497374256269010490377819868359381465741268049256487985561453723478673303904688383436346553794986419270563872931748723320837601123029911367938627089438799362016295154133714248928307220126901475466847653576164773794675200490757155527819653621323926406160136358155907422020203187277605277219005561484255518792530343513984425322341576233610642506390497500865627109535919465897514131034822769306247435363256916078154781811528436679570611086153315044521274739245449454236828860613408414863776700961207151249140430272538607648236341433462351897576645216413767969031495019108575984423919862916421939949072362346468441173940326591840443780513338945257423995082965912285085558215725031071257012668302402929525220118726767562204154205161841634847565169998116141010029960783869092916030288400269104140792886215078424516709087000699282120660418371806535567252532567532861291042487761825829765157959847035622262934860034158722980534989650226291748788202734209222245339856264766914905562842503912757710284027998066365825488926488025456610172967026640765590429099456815065265305371829412703369313785178609040708667114965583434347693385781711386455873678123014587687126603489139095620099393610310291616152881384379099042317473363948045759314931405297634757481193567091101377517210080315590248530906692037671922033229094334676851422144773793937517034436619910403375111735471918550464490263655128162288244625759163330391072253837421821408835086573917715096828874782656995995744906617583441375223970968340800535598491754173818839994469748676265516582765848358845314277568790029095170283529716344562129640435231176006651012412006597558512761785838292041974844236080071930457618932349229279650198751872127267507981255470958904556357921221033346697499235630254947802490114195212382815309114079073860251522742995818072471625916685451333123948049470791191532673430282441860414263639548000448002670496248201792896476697583183271314251702969234889627668440323260927524960357996469256504936818360900323809293459588970695365349406034021665443755890045632882250545255640564482465151875471196218443965825337543885690941130315095261793780029741207665147939425902989695946995565761218656196733786236256125216320862869222103274889218654364802296780705765615144632046927906821207388377814233562823608963208068222468012248261177185896381409183903673672220888321513755600372798394004152970028783076670944474560134556417254370906979396122571429894671543578468788614445812314593571984922528471605049221242470141214780573455105008019086996033027634787081081754501193071412233908663938339529425786905076431006383519834389341596131854347546495569781038293097164651438407007073604112373599843452251610507027056235266012764848308407611830130527932054274628654036036745328651057065874882256981579367897669742205750596834408697350201410206723585020072452256326513410559240190274216248439140359989535394590944070469120914093870012645600162374288021092764579310657922955249887275846101264836999892256959688159205600101655256375678566722796619885782794848855834397518744545512965634434803966420557982936804352202770984294232533022576341807039476994159791594530069752148293366555661567873640053666564165473217043903521329543529169414599041608753201868379370234888689479151071637852902345292440773659495630510074210871426134974595615138498713757047101787957310422969066670214498637464595280824369445789772330048764765241339075920434019634039114732023380715095222010682563427471646024335440051521266932493419673977041595683753555166730273900749729736354964533288869844061196496162773449518273695588220757355176651589855190986665393549481068873206859907540792342402300925900701731960362254756478940647548346647760411463233905651343306844953979070903023460461470961696886885014083470405460742958699138296682468185710318879065287036650832431974404771855678934823089431068287027228097362480939962706074726455399253994428081137369433887294063079261595995462624629707062594845569034711972996409089418059534393251236235508134949004364278527138315912568989295196427287573946914272534366941532361004537304881985517065941217352462589548730167600298865925786628561249665523533829428785425340483083307016537228563559152534784459818313411290019992059813522051173365856407826484942764411376393866924803118364453698589175442647399882284621844900877769776312795722672655562596282542765318300134070922334365779160128093179401718598599933849235495640057099558561134980252499066984233017350358044081168552653117099570899427328709258487894436460050410892266917835258707859512983441729535195378855345737426085902908176515578039059464087350612322611200937310804854852635722825768203416050484662775045003126200800799804925485346941469775164932709504934639382432227188515974054702148289711177792376122578873477188196825462981268685817050740272550263329044976277894423621674119186269439650671515779586756482399391760426017633870454990176143641204692182370764887834196896861181558158736062938603810171215855272668300823834046564758804051380801633638874216371406435495561868964112282140753302655100424104896783528588290243670904887118190909494533144218287661810310073547705498159680772009474696134360928614849417850171807793068108546900094458995279424398139213505586422196483491512639012803832001097738680662877923971801461343244572640097374257007359210031541508936793008169980536520276007277496745840028362405346037263416554259027601834840306811381855105979705664007509426087885735796037324514146786703688098806097164258497595138069309449401515422221943291302173912538355915031003330325111749156969174502714943315155885403922164097229101129035521815762823283182342548326111912800928252561902052630163911477247331485739107775874425387611746578671169414776421441111263583553871361011023267987756410246824032264834641766369806637857681349204530224081972785647198396308781543221166912246415911776732253264335686146186545222681268872684459684424161078540167681420808850280054143613146230821025941737562389942075713627516745731891894562835257044133543758575342698699472547031656613991999682628247270641336222178923903176085428943733935618891651250424404008952719837873864805847268954624388234375178852014395600571048119498842390606136957342315590796703461491434478863604103182350736502778590897578272731305048893989009923913503373250855982655867089242612429473670193907727130706869170926462548423240748550366080136046689511840093668609546325002145852930950000907151058236267293264537382104938724996699339424685516483261134146110680267446637334375340764294026682973865220935701626384648528514903629320199199688285171839536691345222444708045923966028171565515656661113598231122506289058549145097157553900243931535190902107119457300243880176615035270862602537881797519478061013715004489917210022201335013106016391541589578037117792775225978742891917915522417189585361680594741234193398420218745649256443462392531953135103311476394911995072858430658361935369329699289837914941939406085724863968836903265564364216644257607914710869984315733749648835292769328220762947282381537409961545598798259891093717126218283025848112389011968221429457667580718653806506487026133892822994972574530332838963818439447707794022843598834100358385423897354243956475556840952248445541392394100016207693636846776413017819659379971557468541946334893748439129742391433659360410035234377706588867781139498616478747140793263858738624732889645643598774667638479466504074111825658378878454858148962961273998413442726086061872455452360643153710112746809778704464094758280348769758948328241239292960582948619196670918958089833201210318430340128495116203534280144127617285830243559830032042024512072872535581195840149180969253395075778400067465526031446167050827682772223534191102634163157147406123850425845988419907611287258059113935689601431668283176323567325417073420817332230462987992804908514094790368878687894930546955703072619009502076433493359106024545086453628935456862958531315337183868265617862273637169757741830239860065914816164049449650117321313895747062088474802365371031150898427992754426853277974311395143574172219759799359685252285745263796289612691572357986620573408375766873884266405990993505000813375432454635967504844235284874701443545419576258473564216198134073468541117668831186544893776979566517279662326714810338643913751865946730024434500544995399742372328712494834706044063471606325830649829795510109541836235030309453097335834462839476304775645015008507578949548931393944899216125525597701436858943585877526379625597081677643800125436502371412783467926101995585224717220177723700417808419423948725406801556035998390548985723546745642390585850216719031395262944554391316631345308939062046784387785054239390524731362012947691874975191011472315289326772533918146607300089027768963114810902209724520759167297007850580717186381054967973100167870850694207092232908070383263453452038027860990556900134137182368370991949516489600755049341267876436746384902063964019766685592335654639138363185745698147196210841080961884605456039038455343729141446513474940784884423772175154334260306698831768331001133108690421939031080143784334151370924353013677631084913516156422698475074303297167469640666531527035325467112667522460551199581831963763707617991919203579582007595605302346267757943936307463056901080114942714100939136913810725813781357894005599500183542511841721360557275221035268037357265279224173736057511278872181908449006178013889710770822931002797665935838758909395688148560263224393726562472776037890814458837855019702843779362407825052704875816470324581290878395232453237896029841669225489649715606981192186584926770403956481278102179913217416305810554598801300484562997651121241536374515005635070127815926714241342103301566165356024733807843028655257222753049998837015348793008062601809623815161366903341111386538510919367393835229345888322550887064507539473952043968079067086806445096986548801682874343786126453815834280753061845485903798217994599681154419742536344399602902510015888272164745006820704193761584547123183460072629339550548239557137256840232268213012476794522644820910235647752723082081063518899152692889108455571126603965034397896278250016110153235160519655904211844949907789992007329476905868577878720982901352956613978884860509786085957017731298155314951681467176959760994210036183559138777817698458758104466283998806006162298486169353373865787735983361613384133853684211978938900185295691967804554482858483701170967212535338758621582310133103877668272115726949518179589754693992642197915523385766231676275475703546994148929041301863861194391962838870543677743224276809132365449485366768000001065262485473055861598999140170769838548318875014293890899506854530765116803337322265175662207526951791442252808165171667766727930354851542040238174608923283917032754257508676551178593950027933895920576682789677644531840404185540104351348389531201326378369283580827193783126549617459970567450718332065034556644034490453627560011250184335607361222765949278393706478426456763388188075656121689605041611390390639601620221536849410926053876887148379895599991120991646464411918568277004574243434021672276445589330127781586869525069499364610175685060167145354315814801054588605645501332037586454858403240298717093480910556211671546848477803944756979804263180991756422809873998766973237695737015808068229045992123661689025962730430679316531149401764737693873514093361833216142802149763399189835484875625298752423873077559555955465196394401821840998412489826236737714672260616336432964063357281070788758164043814850188411431885988276944901193212968271588841338694346828590066640806314077757725705630729400492940302420498416565479736705485580445865720227637840466823379852827105784319753541795011347273625774080213476826045022851579795797647467022840999561601569108903845824502679265942055503958792298185264800706837650418365620945554346135134152570065974881916341359556719649654032187271602648593049039787489589066127250794828276938953521753621850796297785146188432719223223810158744450528665238022532843891375273845892384422535472653098171578447834215822327020690287232330053862163479885094695472004795231120150432932266282727632177908840087861480221475376578105819702226309717495072127248479478169572961423658595782090830733233560348465318730293026659645013718375428897557971449924654038681799213893469244741985097334626793321072686870768062639919361965044099542167627840914669856925715074315740793805323925239477557441591845821562518192155233709607483329234921034514626437449805596103307994145347784574699992128599999399612281615219314888769388022281083001986016549416542616968586788372609587745676182507275992950893180521872924610867639958916145855058397274209809097817293239301067663868240401113040247007350857828724627134946368531815469690466968693925472519413992914652423857762550047485295476814795467007050347999588867695016124972282040303995463278830695976249361510102436555352230690612949388599015734661023712235478911292547696176005047974928060721268039226911027772261025441492215765045081206771735712027180242968106203776578837166909109418074487814049075517820385653909910477594141321543284406250301802757169650820964273484146957263978842560084531214065935809041271135920041975985136254796160632288736181367373244506079244117639975974619383584574915988097667447093006546342423460634237474666080431701260052055928493695941434081468529815053947178900451835755154125223590590687264878635752541911288877371766374860276606349603536794702692322971868327717393236192007774522126247518698334951510198642698878471719396649769070825217423365662725928440620430214113719922785269984698847702323823840055655517889087661360130477098438611687052310553149162517283732728676007248172987637569816335415074608838663640693470437206688651275688266149730788657015685016918647488541679154596507234287730699853713904300266530783987763850323818215535597323530686043010675760838908627049841888595138091030423595782495143988590113185835840667472370297149785084145853085781339156270760356390763947311455495832266945702494139831634332378975955680856836297253867913275055542524491943589128405045226953812179131914513500993846311774017971512283785460116035955402864405902496466930707769055481028850208085800878115773817191741776017330738554758006056014337743299012728677253043182519757916792969965041460706645712588834697979642931622965520168797300035646304579308840327480771811555330909887025505207680463034608658165394876951960044084820659673794731680864156456505300498816164905788311543454850526600698230931577765003780704661264706021457505793270962047825615247145918965223608396645624105195510522357239739512881816405978591427914816542632892004281609136937773722299983327082082969955737727375667615527113922588055201898876201141680054687365580633471603734291703907986396522961312801782679717289822936070288069087768660593252746378405397691848082041021944719713869256084162451123980620113184541244782050110798760717155683154078865439041210873032402010685341947230476666721749869868547076781205124736792479193150856444775379853799732234456122785843296846647513336573692387201464723679427870042503255589926884349592876124007558756946413705625140011797133166207153715436006876477318675587148783989081074295309410605969443158477539700943988394914432353668539209946879645066533985738887866147629443414010498889931600512076781035886116602029611936396821349607501116498327856353161451684576956871090029997698412632665023477167286573785790857466460772283415403114415294188047825438761770790430001566986776795760909966936075594965152736349811896413043311662774712338817406037317439705406703109676765748695358789670031925866259410510533584384656023391796749267844763708474978333655579007384191473198862713525954625181604342253729962863267496824058060296421146386436864224724887283434170441573482481833301640566959668866769563491416328426414974533349999480002669987588815935073578151958899005395120853510357261373640343675347141048360175464883004078464167452167371904831096767113443494819262681110739948250607394950735031690197318521195526356325843390998224986240670310768318446607291248747540316179699411397387765899868554170318847788675929026070043212666179192235209382278788809886335991160819235355570464634911320859189796132791319756490976000139962344455350143464268604644958624769094347048293294140411146540923988344435159133201077394411184074107684981066347241048239358274019449356651610884631256785297769734684303061462418035852933159734583038455410337010916767763742762102137013548544509263071901147318485749233181672072137279355679528443925481560913728128406333039373562420016045664557414588166052166608738748047243391212955877763906969037078828527753894052460758496231574369171131761347838827194168606625721036851321566478001476752310393578606896111259960281839309548709059073861351914591819510297327875571049729011487171897180046961697770017913919613791417162707018958469214343696762927459109940060084983568425201915593703701011049747339493877885989417433031785348707603221982970579751191440510994235883034546353492349826883624043327267415540301619505680654180939409982020609994140216890900708213307230896621197755306659188141191577836272927461561857103721724710095214236964830864102592887457999322374955191221951903424452307535133806856807354464995127203174487195403976107308060269906258076020292731455252078079914184290638844373499681458273372072663917670201183004648190002413083508846584152148991276106513741539435657211390328574918769094413702090517031487773461652879848235338297260136110984514841823808120540996125274580881099486972216128524897425555516076371675054896173016809613803811914361143992106380050832140987604599309324851025168294467260666138151745712559754953580239983146982203613380828499356705575524712902745397762140493182014658008021566536067765508783804304134310591804606800834591136640834887408005741272586704792258319127415739080914383138456424150940849133918096840251163991936853225557338966953749026620923261318855891580832455571948453875628786128859004106006073746501402627824027346962528217174941582331749239683530136178653673760642166778137739951006589528877427662636841830680190804609849809469763667335662282915132352788806157768278159588669180238940333076441912403412022316368577860357276941541778826435238131905028087018575047046312933353757285386605888904583111450773942935201994321971171642235005644042979892081594307167019857469273848653833436145794634175922573898588001698014757420542995801242958105456510831046297282937584161162532562516572498078492099897990620035936509934721582965174135798491047111660791587436986541222348341887722929446335178653856731962559852026072947674072616767145573649812105677716893484917660771705277187601199908144113058645577910525684304811440261938402322470939249802933550731845890355397133088446174107959162511714864874468611247605428673436709046678468670274091881014249711149657817724279347070216688295610877794405048437528443375108828264771978540006509704033021862556147332117771174413350281608840351781452541964320309576018694649088681545285621346988355444560249556668436602922195124830910605377201980218310103270417838665447181260397190688462370857518080035327047185659499476124248110999288679158969049563947624608424065930948621507690314987020673533848349550836366017848771060809804269247132410009464014373603265645184566792456669551001502298330798496079949882497061723674493612262229617908143114146609412341593593095854079139087208322733549572080757165171876599449856937956238755516175754380917805280294642004472153962807463602113294255916002570735628126387331060058910652457080244749375431841494014821199962764531068006631183823761639663180931444671298615527598201451410275600689297502463040173514891945763607893528555053173314164570504996443890936308438744847839616840518452732884032345202470568516465716477139323775517294795126132398229602394548579754586517458787713318138752959809412174227300352296508089177705068259248822322154938048371454781647213976820963320508305647920482085920475499857320388876391601995240918938945576768749730856955958010659526503036266159750662225084067428898265907510637563569968211510949669744580547288693631020367823250182323708459790111548472087618212477813266330412076216587312970811230758159821248639807212407868878114501655825136178903070860870198975889807456643955157415363193191981070575336633738038272152798849350397480015890519420879711308051233933221903466249917169150948541401871060354603794643379005890957721180804465743962806186717861017156740967662080295766577051291209907944304632892947306159510430902221439371849560634056189342513057268291465783293340524635028929175470872564842600349629611654138230077313327298305001602567240141851520418907011542885799208121984493156999059182011819733500126187728036812481995877070207532406361259313438595542547781961142935163561223496661522614735399674051584998603552953329245752388810136202347624669055816438967863097627365504724348643071218494373485300606387644566272186661701238127715621379746149861328744117714552444708997144522885662942440230184791205478498574521634696448973892062401943518310088283480249249085403077863875165911302873958787098100772718271874529013972836614842142871705531796543076504534324600536361472618180969976933486264077435199928686323835088756683595097265574815431940195576850437248001020413749831872259677387154958399718444907279141965845930083942637020875635398216962055324803212267498911402678528599673405242031091797899905718821949391320753431707980023736590985375520238911643467185582906853711897952626234492483392496342449714656846591248918556629589329909035239233333647435203707701010843880032907598342170185542283861617210417603011645918780539367447472059985023582891833692922337323999480437108419659473162654825748099482509991833006976569367159689364493348864744213500840700660883597235039532340179582557036016936990988671132109798897070517280755855191269930673099250704070245568507786790694766126298082251633136399521170984528092630375922426742575599892892783704744452189363203489415521044597261883800300677617931381399162058062701651024458869247649246891924612125310275731390840470007143561362316992371694848132554200914530410371354532966206392105479824392125172540132314902740585892063217589494345489068463993137570910346332714153162232805522972979538018801628590735729554162788676498274186164218789885741071649069191851162815285486794173638906653885764229158342500673612453849160674137340173572779956341043326883569507814931378007362354180070619180267328551191942676091221035987469241172837493126163395001239599240508454375698507957046222664619000103500490183034153545842833764378111988556318777792537201166718539541835984438305203762819440761594106820716970302285152250573126093046898423433152732131361216582808075212631547730604423774753505952287174402666389148817173086436111389069420279088143119448799417154042103412190847094080254023932942945493878640230512927119097513536000921971105412096683111516328705423028470073120658032626417116165957613272351566662536672718998534199895236884830999302757419916463841427077988708874229277053891227172486322028898425125287217826030500994510824783572905691988555467886079462805371227042466543192145281760741482403827835829719301017888345674167811398954750448339314689630763396657226727043393216745421824557062524797219978668542798977992339579057581890622525473582205236424850783407110144980478726691990186438822932305382318559732869780922253529591017341407334884761005564018242392192695062083183814546983923664613639891012102177095976704908305081854704194664371312299692358895384930136356576186106062228705599423371631021278457446463989738188566746260879482018647487672727222062676465338099801966883680994159075776852639865146253336312450536402610569605513183813174261184420189088853196356986962795036738424313011331753305329802016688817481342988681585577810343231753064784983210629718425184385534427620128234570716988530518326179641178579608888150329602290705614476220915094739035946646916235396809201394578175891088931992112260073928149169481615273842736264298098234063200244024495894456129167049508235812487391799648641133480324757775219708932772262349486015046652681439877051615317026696929704928316285504212898146706195331970269507214378230476875280287354126166391708245925170010714180854800636923259462019002278087409859771921805158532147392653251559035410209284665925299914353791825314545290598415817637058927906909896911164381187809435371521332261443625314490127454772695739393481546916311624928873574718824071503995009446731954316193855485207665738825139639163576723151005556037263394867208207808653734942440115799667507360711159351331959197120948964717553024531364770942094635696982226673775209945168450643623824211853534887989395673187806606107885440005508276570305587448541805778891719207881423351138662929667179643468760077047999537883387870348718021842437342112273940255717690819603092018240188427057046092622564178375265263358324240661253311529423457965569502506810018310900411245379015332966156970522379210325706937051090830789479999004999395322153622748476603613677697978567386584670936679588583788795625946464891376652199588286933801836011932368578558558195556042156250883650203322024513762158204618106705195330653060606501054887167245377942831338871631395596905832083416898476065607118347136218123246227258841990286142087284956879639325464285343075301105285713829643709990356948885285190402956047346131138263878897551788560424998748316382804046848618938189590542039889872650697620201995548412650005394428203930127481638158530396439925470201672759328574366661644110962566337305409219519675148328734808957477775278344221091073111351828046036347198185655572957144747682552857863349342858423118749440003229690697758315903858039353521358860079600342097547392296733310649395601812237812854584317605561733861126734780745850676063048229409653041118306671081893031108871728167519579675347188537229309616143204006381322465841111157758358581135018569047815368938137718472814751998350504781297718599084707621974605887423256995828892535041937958260616211842368768511418316068315867994601652057740529423053601780313357263267054790338401257305912339601880137825421927094767337191987287385248057421248921183470876629667207272325650565129333126059505777727542471241648312832982072361750574673870128209575544305968395555686861188397135522084452852640081252027665557677495969626612604565245684086139238265768583384698499778726706555191854468698469478495734622606294219624557085371272776523098955450193037732166649182578154677292005212667143463209637891852323215018976126034373684067194193037746880999296877582441047878123266253181845960453853543839114496775312864260925211537673258866722604042523491087026958099647595805794663973419064010036361904042033113579336542426303561457009011244800890020801478056603710154122328891465722393145076071670643556827437743965789067972687438473076346451677562103098604092717090951280863090297385044527182892749689212106670081648583395537735919136950153162018908887484210798706899114804669270650940762046502772528650728905328548561433160812693005693785417861096969202538865034577183176686885923681488475276498468821949739729707737187188400414323127636504814531122850990020742409255859252926103021067368154347015252348786351643976235860419194129697690405264832347009911154242601273438022089331096686367898694977994001260164227609260823493041180643829138347354679725399262338791582998486459271734059225620749105308531537182911681637219395188700957788181586850464507699343940987433514431626330317247747486897918209239480833143970840673084079589358108966564775859905563769525232653614424780230826811831037735887089240613031336477371011628214614661679404090518615260360092521947218890918107335871964142144478654899528582343947050079830388538860831035719306002771194558021911942899922722353458707566246926177663178855144350218287026685610665003531050216318206017609217984684936863161293727951873078972637353717150256378733579771808184878458866504335824377004147710414934927438457587107159731559439426412570270965125108115548247939403597681188117282472158250109496096625393395380922195591918188552678062149923172763163218339896938075616855911752998450132067129392404144593862398809381240452191484831646210147389182510109096773869066404158973610476436500068077105656718486281496371118832192445663945814491486165500495676982690308911185687986929470513524816091743243015383684707292898982846022237301452655679898627767968091469798378268764311598832109043715611299766521539635464420869197567370005738764978437686287681792497469438427465256316323005551304174227341646455127812784577772457520386543754282825671412885834544435132562054464241011037955464190581168623059644769587054072141985212106734332410756767575818456990693046047522770167005684543969234041711089888993416350585157887353430815520811772071880379104046983069578685473937656433631979786803671873079693924236321448450354776315670255390065423117920153464977929066241508328858395290542637687668968805033317227800185885069736232403894700471897619347344308437443759925034178807972235859134245813144049847701732361694719765715353197754997162785663119046912609182591249890367654176979903623755286526375733763526969344354400473067198868901968147428767790866979688522501636949856730217523132529265375896415171479559538784278499866456302878831962099830494519874396369070682762657485810439112232618794059941554063270131989895703761105323606298674803779153767511583043208498720920280929752649812569163425000522908872646925284666104665392171482080130502298052637836426959733707053922789153510568883938113249757071331029504430346715989448786847116438328050692507766274500122003526203709466023414648998390252588830148678162196775194583167718762757200505439794412459900771152051546199305098386982542846407255540927403132571632640792934183342147090412542533523248021932277075355546795871638358750181593387174236061551171013123525633485820365146141870049205704372018261733194715700867578539336078622739558185797587258744102542077105475361294047460100094095444959662881486915903899071865980563617137692227290764197755177720104276496949611056220592502420217704269622154958726453989227697660310524980855759471631075870133208861463266412591148633881220284440694169488261529577625325019870359870674380469821942056381255833436421949232275937221289056420943082352544084110864545369404969271494003319782861318186188811118408257865928757426384450059944229568586460481033015388911499486935436030221810943466764000022362550573631294626296096198760564259963946138692330837196265954739234624134597795748524647837980795693198650815977675350553918991151335252298736112779182748542008689539658359421963331502869561192012298889887006079992795411188269023078913107603617634779489432032102773359416908650071932804017163840644987871753756781185321328408216571107549528294974936214608215583205687232185574065161096274874375098092230211609982633033915469494644491004515280925089745074896760324090768983652940657920198315265410658136823791984090645712468948470209357761193139980246813405200394781949866202624008902150166163813538381515037735022966074627952910384068685569070157516624192987244482719429331004854824454580718897633003232525821581280327467962002814762431828622171054352898348208273451680186131719593324711074662228508710666117703465352839577625997744672185715816126411143271794347885990892808486694914139097716736900277758502686646540565950394867841110790116104008572744562938425494167594605487117235946429105850909950214958793112196135908315882620682332156153086833730838173279328196983875087083483880463884784418840031847126974543709373298362402875197920802321878744882872843727378017827008058782410749357514889978911739746129320351081432703251409030487462262942344327571260086642508333187688650756429271605525289544921537651751492196367181049435317858383453865255656640657251363575064353236508936790431702597878177190314867963840828810209461490079715137717099061954969640070867667102330048672631475510537231757114322317411411680622864206388906210192355223546711662137499693269321737043105987225039456574924616978260970253359475020913836673772894438696400028110344026084712899000746807764844088711341352503367877316797709372778682166117865344231732264637847697875144332095340001650692130546476890985050203015044880834261845208730530973189492916425322933612431514306578264070283898409841602950309241897120971601649265613413433422298827909921786042679812457285345801338260995877178113102167340256562744007296834066198480676615805021691833723680399027931606420436812079900316264449146190219458229690992122788553948783538305646864881655562294315673128274390826450611628942803501661336697824051770155219626522725455850738640585299830379180350432876703809252167907571204061237596327685674845079151147313440001832570344920909712435809447900462494313455028900680648704293534037436032625820535790118395649089354345101342969617545249573960621490288728932792520696535386396443225388327522499605986974759882329916263545973324445163755334377492928990581175786355555626937426910947117002165411718219750519831787137106051063795558588905568852887989084750915764639074693619881507814685262133252473837651192990156109189777922008705793396463827490680698769168197492365624226087154176100430608904377976678519661891404144925270480881971498801542057787006521594009289777601330756847966992955433656139847738060394368895887646054983871478968482805384701730871117761159663505039979343869339119789887109156541709133082607647406305711411098839388095481437828474528838368079418884342666222070438722887413947801017721392281911992365405516395893474263953824829609036900288359327745855060801317988407162446563997948275783650195514221551339281978226984278638391679715091262410548725700924070045488485692950448110738087996547481568913935380943474556972128919827177020766613602489581468119133614121258783895577357194986317210844398901423948496659251731388171602663261931065366535041473070804414939169363262373767777095850313255990095762731957308648042467701212327020533742667053142448208168130306397378736642483672539837487690980602182785786216512738563513290148903509883270617258932575363993979055729175160097615459044771692265806315111028038436017374742152476085152099016158582312571590733421736576267142390478279587281505095633092802668458937649649770232973641319060982740633531089792464242134583740901169391964250459128813403498810635400887596820054408364386516617880557608956896727531538081942077332597917278437625661184319891025007491829086475149794003160703845549465385946027452447466812314687943441610993338908992638411847425257044572517459325738989565185716575961481266020310797628254165590506042479114016957900338356574869252800743025623419498286467914476322774005529460903940177536335655471931000175430047504719144899841040015867946179241610016454716551337074073950260442769538553834397550548871099785205401175169747581344926079433689543783221172450687344231989878844128542064742809735625807066983106979935260693392135685881391214807354728463227784908087002467776303605551232386656295178853719673034634701222939581606792509153217489030840886516061119011498443412350124646928028805996134283511884715449771278473361766285062169778717743824362565711779450064477718370221999106695021656757644044997940765037999954845002710665987813603802314126836905783190460792765297277694043613023051787080546511542469395265127101052927070306673024447125973939950514628404767431363739978259184541176413327906460636584152927019030276017339474866960348694976541752429306040727005059039503148522921392575594845078867977925253931765156416197168443524369794447355964260633391055126826061595726217036698506473281266724521989060549880280782881429796336696744124805982192146339565745722102298677599746738126069367069134081559412016115960190237753525556300606247983261249881288192937343476862689219239777833910733106588256813777172328315329082525092733047850724977139448333892552081175608452966590553940965568541706001179857293813998258319293679100391844099286575605993598910002969864460974714718470101531283762631146774209145574041815908800064943237855839308530828305476076799524357391631221886057549673832243195650655460852881201902363644712703748634421727257879503428486312944916318475347531435041392096108796057730987201352484075057637199253650470908582513936863463863368042891767107602111159828875539940120076013947033661793715396306139863655492213741597905119083588290097656647300733879314678913181465109316761575821351424860442292445304113160652700974330088499034675405518640677342603583409608605533747362760935658853109760994238347382222087292464497684560579562516765574088410321731345627735856052358236389532038534024842273371639123973215995440828421666636023296545694703577184873442034227706653837387506169212768015766181095420097708363604361110592409117889540338021426523948929686439808926114635414571535194342850721353453018315875628275733898268898523557799295727645229391567477566676051087887648453493636068278050564622813598885879259940946446041705204470046315137975431737187756039815962647501410906658866162180038266989961965580587208639721176995219466789857011798332440601811575658074284182910615193917630059194314434605154047710570054339000182453117733718955857603607182860506356479979004139761808955363669603162193113250223851791672055180659263518036251214575926238369348222665895576994660491938112486609099798128571823494006615552196112207203092277646200999315244273589488710576623894693889446495093960330454340842102462401048723328750081749179875543879387381439894238011762700837196053094383940063756116458560943129517597713935396074322792489221267045808183313764165818269562105872892447740035947009268662659651422050630078592002488291860839743732353849083964326147000532423540647042089499210250404726781059083644007466380020870126664209457181702946752278540074508552377720890581683918446592829417018288233014971554235235911774818628592967605048203864343108779562892925405638946621948268711042828163893975711757786915430165058602965217459581988878680408110328432739867198621306205559855266036405046282152306154594474489908839081999738747452969810776201487134000122535522246695409315213115337915798026979555710508507473874750758068765376445782524432638046143042889235934852961058269382103498000405248407084403561167817170512813378805705643450616119330424440798260377951198548694559152051960093041271007277849301555038895360338261929343797081874320949914159593396368110627557295278004254863060054523839151068998913578820019411786535682149118528207852130125518518493711503422159542244511900207393539627400208110465530207932867254740543652717595893500716336076321614725815407642053020045340183572338292661915308354095120226329165054426123619197051613839357326693760156914429944943744856809775696303129588719161129294681884936338647392747601226964158848900965717086160598147204467428664208765334799858222090619802173211614230419477754990738738567941189824660913091691772274207233367635032678340586301930193242996397204445179288122854478211953530898910125342975524727635730226281382091807439748671453590778633530160821559911314144205091447293535022230817193663509346865858656314855575862447818620108711889760652969899269328178705576435143382060141077329261063431525337182243385263520217735440715281898137698755157574546939727150488469793619500477720970561793913828989845327426227288647108883270173723258818244658436249580592560338105215606206155713299156084892064340303395262263451454283678698288074251422567451806184149564686111635404971897682154227722479474033571527436819409892050113653400123846714296551867344153741615042563256713430247655125219218035780169240326699541746087592409207004669340396510178134857835694440760470232540755557764728450751826890418293966113310160131119077398632462778219023650660374041606724962490137433217246454097412995570529142438208076098364823465973886691349919784013108015581343979194852830436739012482082444814128095443773898320059864909159505322857914576884962578665885999179867520554558099004556461178755249370124553217170194282884617402736649978475508294228020232901221630102309772151569446427909802190826689868834263071609207914085197695235553488657743425277531197247430873043619511396119080030255878387644206085044730631299277888942729189727169890575925244679660189707482960949190648764693702750773866432391919042254290235318923377293166736086996228032557185308919284403805071030064776847863243191000223929785255372375566213644740096760539439838235764606992465260089090624105904215453927904411529580345334500256244101006359530039598864466169595626351878060688513723462707997327233134693971456285542615467650632465676620279245208581347717608521691340946520307673391841147504140168924121319826881568664561485380287539331160232292555618941042995335640095786495340935115266454024418775949316930560448686420862757201172319526405023099774567647838488973464317215980626787671838005247696884084989185086149003432403476742686245952395890358582135006450998178244636087317754378859677672919526111213859194725451400301180503437875277664402762618941017576872680428176623860680477885242887430259145247073950546525135339459598789619778911041890292943818567205070964606263541732944649576612651953495701860015412623962286413897796733329070567376962156498184506842263690367849555970026079867996261019039331263768556968767029295371162528005543100786408728939225714512481135778627664902425161990277471090335933309304948380597856628844787441469841499067123764789582263294904679812089984857163571087831191848630254501620929805829208334813638405421720056121989353669371336733392464416125223196943471206417375491216357008573694397305979709719726666642267431117762176403068681310351899112271339724036887000996862922546465006385288620393800504778276912835603372548255793912985251506829969107754257647488325341412132800626717094009098223529657957997803018282428490221470748111124018607613415150387569830918652780658896682362523937845272634530420418802508442363190383318384550522367992357752929106925043261446950109861088899914658551881873582528164302520939285258077969737620845637482114433988162710031703151334402309526351929588680690821355853680161000213740851154484912685841268695899174149133820578492800698255195740201818105641297250836070356851055331787840829000041552511865779453963317538532092149720526607831260281961164858098684587525129997404092797683176639914655386108937587952214971731728131517932904431121815871023518740757222100123768721944747209349312324107065080618562372526732540733324875754482967573450019321902199119960797989373383673242576103938985349278777473980508080015544764061053522202325409443567718794565430406735896491017610775948364540823486130254718476485189575836674399791508512858020607820554462991723202028222914886959399729974297471155371858924238493855858595407438104882624648788053304271463011941589896328792678327322456103852197011130466587100500083285177311776489735230926661234588873102883515626446023671996644554727608310118788389151149340939344750073025855814756190881398752357812331342279866503522725367171230756861045004548970360079569827626392344107146584895780241408158405229536937499710665594894459246286619963556350652623405339439142111271810691052290024657423604130093691889255865784668461215679554256605416005071276641766056874274200329577160643448606201239821698271723197826816628249938714995449137302051843669076723577400053932662622760323659751718925901801104290384274185507894887438832703063283279963007200698012244365116394086922220745320244624121155804354542064215121585056896157356414313068883443185280853975927734433655384188340303517822946253702015782157373265523185763554098954033236382319219892171177449469403678296185920803403867575834111518824177439145077366384071880489358256868542011645031357633355509440319236720348651010561049872726472131986543435450409131859513145181276437310438972507004981987052176272494065214619959232142314439776546708351714749367986186552791715824080651063799500184295938799158350171580759883784962257398512129810326379376218322456594236685376799113140108043139732335449090824910499143325843298821033984698141715756010829706583065211347076803680695322971990599904451209087275776225351040902392888779424630483280319132710495478599180196967835321464441189260631526618167443193550817081875477050802654025294109218264858213857526688155584113198560022135158887210365696087515063187533002942118682221893775546027227291290504292259787710667873840000616772154638441292371193521828499824350920891801685572798156421858191197490985730570332667646460728757430565372602768982373259745084479649545648030771598153955827779139373601717422996027353102768719449444917939785144631597314435351850491413941557329382048542123508173912549749819308714396615132942045919380106231421774199184060180347949887691051557905554806953878540066453375981862846419905220452803306263695626490910827627115903856995051246529996062855443838330327638599800792922846659503551211245284087516229060262011857775313747949362055496401073001348853150735487353905602908933526400713274732621960311773433943673385759124508149335736911664541281788171454023054750667136518258284898099512139193995633241336556777098003081910272040997148687418134667006094051021462690280449159646545330107754695413088714165312544813061192407821188690056027781824235022696189344352547633573536485619363254417756613981703930632872166905722259745209192917262199844409646158269456380239502837121686446561785235565164127712826918688615572716201474934052276946595712198314943381622114006936307430444173284786101777743837977037231795255434107223445512555589998646183876764903972461167959018100035098928641204195163551108763204267612979826529425882951141275841262732790798807559751851576841264742209479721843309352972665210015662514552994745127631550917636730259462132930190402837954246323258550301096706922720227074863419005438302650681214142135057154175057508639907673946335146209082888934938376439399256900604067311422093312195936202982972351163259386772241477911629572780752395056251581603133359382311500518626890530658368129988108663263271980611271548858798093487912913707498230575929091862939195014721197586067270092547718025750337730799397134539532646195269996596385654917590458333585799102012713204583903200853878881633637685182083727885131175227769609787962142372162545214591281831798216044111311671406914827170981015457781939202311563871950805024679725792497605772625913328559726371211201905720771409148645074094926718035815157571514050397610963846755569298970383547314100223802583468767350129775413279532060971154506484212185936490997917766874774481882870632315515865032898164228288232746866106592732197907162384642153489852476216789050260998045266483929542357287343977680495774091449538391575565485459058976495198513801007958010783759945775299196700547602252552034453988712538780171960718164078124847847257912407824544361682345239570689514272269750431873633263011103053423335821609333191218806608268341428910415173247216053355849993224548730778822905252324234861531520976938461042582849714963475341837562003014915703279685301868631572488401526639835689563634657435321783493199825542117308467745297085839507616458229630324424328237737450517028560698067889521768198156710781633405266759539424926280756968326107495323390536223090807081455919837355377748742029039018142937311529334644468151212945097596534306284215319445727118614900017650558177095302468875263250119705209476159416768727784472000192789137251841622857783792284439084301181121496366424659033634194540657183544771912446621259392656620306888520055599121235363718226922531781458792593750441448933981608657900876165024635197045828895481793756681046474614105142498870252139936870509372305447734112641354892806841059107716677821238332810262185587751312721179344448201440425745083063944738363793906283008973306241380614589414227694747931665717623182472168350678076487573420491557628217583972975134478990696589532548940335615613167403276472469212505759116251529654568544633498114317670257295661844775487469378464233737238981920662048511894378868224807279352022501796545343757274163910791972952950812942922205347717304184477915673991738418311710362524395716152714669005814700002633010452643547865903290733205468338872078735444762647925297690170912007874183736735087713376977683496344252419949951388315074877537433849458259765560996555954318040920178497184685497370696212088524377013853757681416632722412634423982152941645378000492507262765150789085071265997036708726692764308377229685985169122305037462744310852934305273078865283977335246017463527703205938179125396915621063637625882937571373840754406468964783100704580613446731271591194608435935825987782835266531151065041623295329047772174083559349723758552138048305090009646676088301540612824308740645594431853413755220166305812111033453120745086824339432159043594430312431227471385842030390106070940315235556172767994160020393975099897629335325855575624808996691829864222677502360193257974726742578211119734709402357457222271212526852384295874273501563660093188045493338989741571490544182559738080871565281430102670460284316819230392535297795765862414392701549740879273131051636119137577008929564823323648298263024607975875767745377160102490804624301856524161756655600160859121534556267602192689982855377872583145144082654583484409478463178777374794653580169960779405568701192328608041130904629350871827125934668712766694873899824598527786499569165464029458935064964335809824765965165142090986755203808309203230487342703468288751604071546653834619611223013759451579252696743642531927390036038608236450762698827497618723575476762889950752114804852527950845033958570838130476937881321123674281319487950228066320170022460331989671970649163741175854851878484012054844672588851401562725019821719066960812627785485964818369621410721714214986361918774754509650308957099470934337856981674465828267911940611956037845397855839240761276344105766751024307559814552786167815949657062559755074306521085301597908073343736079432866757890533483669555486803913433720156498834220893399971641479746938696905480089193067138057171505857307148815649920714086758259602876056459782423770242469805328056632787041926768467116266879463486950464507420219373945259262668613552940624781361206202636498199999498405143868285258956342264328707663299304891723400725471764188685351372332667877921738347541480022803392997357936152412755829569276837231234798989446274330454566790062032420516396282588443085438307201495672106460533238537203143242112607424485845094580494081820927639140008540422023556260218564348994145439950410980591817948882628052066441086319001688568155169229486203010738897181007709290590480749092427141018933542818429995988169660993836961644381528877214085268088757488293258735809905670755817017949161906114001908553744882726200936685604475596557476485674008177381703307380305476973609786543859382187220583902344443508867499866506040645874346005331827436296177862518081893144363251205107094690813586440519229512932450078833398788429339342435126343365204385812912834345297308652909783300671261798130316794385535726296998740359570458452230856390098913179475948752126397078375944861139451960286751210561638976008880092746115860800207803341591451797073036835196977766076373785333012024120112046988609209339085365773222392412449051532780950955866459477634482269986074813297302630975028812103517723124465095349653693090018637764094094349837313251321862080214809922685502948454661814715557444709669530177690434272031892770604717784527939160472281534379803539679861424370956683221491465438014593829277393396032754048009552231816667380357183932757077142046723838624617803976292377131209580789363841447929802588065522129262093623930637313496640186619510811583471173312025805866727639992763579078063818813069156366274125431259589936119647626101405563503399523140323113819656236327198961837254845333702062563464223952766943568376761368711962921818754576081617053031590728828700712313666308722754918661395773730546065997437810987649802414011242142773668082751390959313404155826266789510846776118665957660165998178089414985754976284387856100263796543178313634025135814161151902096499133548733131115022700681930135929595971640197196053625033558479980963488718039111612813595968565478868325856437896173159762002419621552896297904819822199462269487137462444729093456470028537694958859591606789282491054412515996300781368367490209374915732896270028656829344431342347351239298259166739503425995868970697267332582735903121288746660451461487850346142827765991608090398652575717263081833494441820193533385071292345774375579344062178711330063106003324053991693682603746176638565758877580201229366353270267100681261825172914608202541892885935244491070138206211553827793565296914576502048643282865557934707209634807372692141186895467322767751335690190153723669036865389161291688887876407525493494249733427181178892759931596719354758988097924525262363659036320070854440784544797348291802082044926670634420437555325050527522833778887040804033531923407685630109347772125639088640413101073817853338316038135280828119040832564401842053746792992622037698718018061122624490909242641985820861751177113789051609140381575003366424156095216328197122335023167422600567941281406217219641842705784328959802882335059828208196666249035857789940333152274817776952843681630088531769694783690580671064828083598046698841098135158654906933319522394363287923990534810987830274500172065433699066117784554364687723631844464768069142828004551074686645392805399409108754939166095731619715033166968309929466349142798780842257220697148875580637480308862995118473187124777291910070227588893486939456289515802965372150409603107761289831263589964893410247036036645058687287589051406841238124247386385427908282733827973326885504935874303160274749063129572349742611221517417153133618622410913869500688835898962349276317316478340077460886655598733382113829928776911495492184192087771606068472874673681886167507221017261103830671787856694812948785048943063086169948798703160515884108282351274153538513365895332948629494495061868514779105804696039069372662670386512905201137810858616188886947957607413585534585151768051973334433495230120395770739623771316030242887200537320998253008977618973129817881944671731160647231476248457551928732782825127182446807824215216469567819294098238926284943760248852279003620219386696482215628093605373178040863727268426696421929946819214908701707533361094791381804063287387593848269535583077395761447997270003472880182785281389503217986345216111066608839314053226944905455527867894417579202440021450780192099804461382547805858048442416404775031536054906591430078158372430123137511562284015838644270890718284816757527123846782459534334449622010096071051370608461801187543120725491334994247617115633321408934609156561550600317384218701570226103101916603887064661438897736318780940711527528174689576401581047016965247557740891644568677717158500583269943401677202156767724068128366565264122982439465133197359199709403275938502669557470231813203243716420586141033606524536939160050644953060161267822648942437397166717661231048975031885732165554988342121802846912529086101485527815277625623750456375769497734336846015607727035509629049392487088406281067943622418704747008368842671022558302403599841645951122485272633632645114017395248086194635840783753556885622317115520947223065437092606797351000565549381224575483728545711797393615756167641692895805257297522338558611388322171107362265816218842443178857488798109026653793426664216990914056536432249301334867988154886628665052346997235574738424830590423677143278792316422403877764330192600192284778313837632536121025336935812624086866699738275977365682227907215832478888642369346396164363308730139814211430306008730666164803678984091335926293402304324974926887831643602681011309570716141912830686577323532639653677390317661361315965553584999398600565155921936759977717933019744688148371103206503693192894521402650915465184309936553493337183425298433679915939417466223900389527673813330617747629574943868716978453767219493506590875711917720875477107189937960894774512654757501871194870738736785890200617373321075693302216320628432065671192096950585761173961632326217708945426214609858410237813215817727602222738133495410481003073275107799948991977963883530734443457532975914263768405442264784216063122769646967156473999043715903323906560726644116438605404838847161912109008701019130726071044114143241976796828547885524779476481802959736049439700479596040292746299203572099761950140348315380947714601056333446998820822120587281510729182971211917876424880354672316916541852256729234429187128163232596965413548589577133208339911288775917226115273379010341362085614577992398778325083550730199818459025958355989260553299673770491722454935329683300002230181517226575787524058832249085821280089747909326100762578770428656006996176212176845478996440705066241710213327486796237430229155358200780141165348065647488230615003392068983794766255036549822805329662862117930628430170492402301985719978948836897183043805182174419147660429752437251683435411217038631379411422095295885798060152938752753799030938871683572095760715221900279379292786303637268765822681241993384808166021603722154710143007377537792699069587121289288019052031601285861825494413353820784883465311632650407642428390870121015194231961652268422003711230464300673442064747718021353070124098860353399152667923871101706221865883573781210935179775604425634694999787251125440854522274810914874307259869602040275941178942581281882159952359658979181144077653354321757595255536158128001163846720319346507296807990793963714961774312119402021297573125165253768017359101557338153772001952444543620071848475663415407442328621060997613243487548847434539665981338717466093020535070271952983943271425371155766600025784423031073429551533945060486222764966687624079324353192992639253731076892135352572321080889819339168668278948281170472624501948409700975760920983724090074717973340788141825195842598096241747610138252643955135259311885045636264188300338539652435997416931322894719878308427600401368074703904097238473945834896186539790594118599310356168436869219485382055780395773881360679549900085123259442529724486666766834641402189915944565309423440650667851948417766779470472041958822043295380326310537494883122180391279678446100139726753892195119117836587662528083690053249004597410947068772912328214304635337283519953648274325833119144459017809607782883583730111857543659958982724531925310588115026307542571493943024453931870179923608166611305426253995833897942971602070338767815033010280120095997252222280801423571094760351925544434929986767817891045559063015953809761875920358937341978962358931125983902598310267193304189215109689156225069659119828323455503059081730735195503721665870288053992138576037035377105178021280129566841984140362872725623214428754302210909472721073474134975514190737043318276626177275996888826027225247133683353452816692779591328861381766349857728936900965749562287103024362590772412219094300871755692625758065709912016659622436080242870024547362036394841255954881727272473653467783647201918303998717627037515724649922289467932322693619177641614618795613956699567783068290316589699430767333508234990790624100202506134057344300695745474682175690441651540636584680463692621274211075399042188716127617787014258864825775223889184599523376292377915585744549477361295525952226578636462118377598473700347971408206994145580719080213590732269233100831759510659019121294795408603640757358750205890208704579670007055262505811420663907459215273309406823649441590891009220296680523325266198911311842016291631076894084723564366808182168657219688268358402785500782804043453710183651096951782335743030504852653738073531074185917705610397395062640355442275156101107261779370634723804990666922161971194259120445084641746383589938239946517395509000859479990136026674261494290066467115067175422177038774507673563742154782905911012619157555870238957001405117822646989944917908301795475876760168094100135837613578591356924455647764464178667115391951357696104864922490083446715486383054477914330097680486878348184672733758436892724310447406807685278625585165092088263813233623148733336714764520450876627614950389949504809560460989604329123358348859990294526400284994280878624039811814884767301216754161106629995553668193123287425702063738352020086863691311733469731741219153633246745325630871347302792174956227014687325867891734558379964351358800959350877556356248810493852999007675135513527792412429277488565888566513247302514710210575352516511814850902750476845518252096331899068527614435138213662152368890578786699432288816028377482035506016029894009119713850179871683633744139275973644017007014763706655703504338121113576415018451821413619823495159601064752712575935185304332875537783057509567425442684712219618709178560783936144511383335649103256405733898667178123972237519316430617013859539474367843392670986712452211189690840236327411496601243483098929941738030588417166613073040067588380432111555379440605497721705942821514886165672771240903387727745629097110134885184374118695655449745736845218066982911045058004299887953899027804383596282409421860556287788428802127553884803728640019441614257499904272009595204654170598104989967504511936471172772220436102614079750809686975176600237187748348016120310234680567112644766123747627852190241202569943534716226660893675219833111813511146503854895025120655772636145473604426859498074396932331297127377157347099713952291182653485155587137336629120242714302503763269501350911612952993785864681307226486008270881333538193703682598867893321238327053297625857382790097826460545598555131836688844628265133798491667839409761353766251798258249663458771950124384040359140849209733754642474488176184070023569580177410177696925077814893386672557898564589851056891960924398841569280696983352240225634570497312245269354193837004843183357196516626721575524193401933099018319309196582920969656247667683659647019595754739345514337413708761517323677204227385674279170698204549953095918872434939524094441678998846319845504852393662972079777452814399418256789457795712552426826089940863317371538896262889629402112108884427376568624527612130371017300785135715404533041507959447776143597437803742436646973247138410492124314138903579092416036406314038149831481905251720937103964026808994832572297954564042701757722904173234796073618787889913318305843069394825961318713816423467218730845133877219086975104942843769325024981656673816260615941768252509993741672883951744066932549653403101452225316189009235376486378482881344209870048096227171226407489571939002918573307460104360729190945767994614929290427981687729426487729952858434647775386906950148984133924540394144680263625402118614317031251117577642829914644533408920976961699098372652361768745605894704968170136974909523072082682887890730190018253425805343421705928713931737993142410852647390948284596418093614138475831136130576108462366837237695913492615824516221552134879244145041756848064120636520170386330129532777699023118648020067556905682295016354931992305914246396217025329747573114094220180199368035026495636955866425906762685687372110339156793839895765565193177883000241613539562437777840801748819373095020699900890899328088397430367736595524891300156633294077907139615464534088791510300651321934486673248275907946807879819425019582622320395131252014109960531260696555404248670549986786923021746989009547850725672978794769888831093487464426400718183160331655511534276155622405474473378049246214952133258527698847336269182649174338987824789278468918828054669982303689939783413747587025805716349413568433929396068192061773331791738208562436433635359863494496890781064019674074436583667071586924521182997893804077137501290858646578905771426833582768978554717687184427726120509266486102051535642840632368481807287940717127966820060727559555904040233178749447346454760628189541512139162918444297651066947969354016866010055196077687335396511614930937570968554559381513789569039251014953265628147011998326992200066392875374713135236421589265126204072887716578358405219646054105435443642166562244565042999010256586927279142752931172082793937751326106052881235373451068372939893580871243869385934389175713376300720319760816604464683937725806909237297523486702916910426369262090199605204121024077648190316014085863558427609537086558164273995349346546314504040199528537252004957805254656251154109252437991326262713609099402902262062836752132305065183934057450112099341464918433323646569371725914489324159006242020612885732926133596808726500045628284557574596592120530341310111827501306961509835515632004310784601906565493806542525229161991819959602752327702249855738824899882707465936355768582560518068964285376850772012220347920993936179268206590142165615925306737944568949070853263568196831861772268249911472615732035807646298116244013316737892788689229032593349861797021994981925739617673075834417098559222170171825712777534491508205278430904619460835217402005838672849709411023266953921445461066215006410674740207009189911951376466904481267253691537162290791385403937560077835153374167747942100384002308951850994548779039346122220865060160500351776264831611153325587705073541279249909859373473787081194253055121436979749914951860535920403830235716352727630874693219622190064260886183676103346002255477477813641012691906569686495012688376296907233961276287223041141813610060264044030035996988919945827397624114613744804059697062576764723766065541618574690527229238228275186799156983390747671146103022776606020061246876477728819096791613354019881402757992174167678799231603963569492851513633647219540611171767387372555728522940054361785176502307544693869307873499110352182532929726044553210797887711449898870911511237250604238753734841257086064069052058452122754533848008205302450456517669518576913200042816758054924811780519832646032445792829730129105318385636821206215531288668564956512613892261367064093953334570526986959692350353094224543865278677673027540402702246384483553239914751363441044050092330361271496081355490531539021002299595756583705381261965683144286057956696622154721695620870013727768536960840704833325132793112232507148630206951245395003735723346807094656483089209801534878705633491092366057554050864111521441481434630437273271045027768661953107858323334857840297160925215326092558932655600672124359464255065996771770388445396181632879614460817789272171836908880126778207430106422524634807454300476492885553409062185153654355474125476152769772667769772777058315801412185688011705028365275543214803488004442979998062157904564161957212784508928489806426497427090579129069217807298769477975112447305991406050629946894280931034216416629935614828130998870745292716048433630818404126469637925843094185442216359084576146078558562473814931427078266215185541603870206876980461747400808324343665382354555109449498431093494759944672673665352517662706772194183191977196378015702169933675083760057163454643671776723387588643405644871566964321041282595645349841388412890420682047007615596916843038999348366793542549210328113363184722592305554383058206941675629992013373175489122037230349072681068534454035993561823576312837767640631013125335212141994611869350833176587852047112364331226765129964171325217513553261867681942338790365468908001827135283584888444111761234101179918709236507184857856221021104009776994453121795022479578069506532965940383987369907240797679040826794007618729547835963492793904576973661643405359792219285870574957481696694062334272619733518136626063735982575552496509807260123668283605928341855848026958413772558970883789942910549800331113884603401939166122186696058491571485733568286149500019097591125218800396419762163559375743718011480559442298730418196808085647265713547612831629200449880315402105530597076666362749328308916880932359290081787411985738317192616728834918402429721290434965526942726402559641463525914348400675867690350382320572934132981593533044446496829441367323442158380761694831219333119819061096142952201536170298575105594326461468505452684975764807808009221335811378197749271768545075538328768874474591593731162470601091244609829424841287520224462594477638749491997840446829257360968534549843266536862844489365704111817793806441616531223600214918768769467398407517176307516849856359201486892943105940202457969622924566644881967576294349535326382171613395757790766370764569570259738800438415805894336137106551859987600754924187211714889295221737721146081154344982665479872580056674724051122007383459271575727715218589946948117940644466399432370044291140747218180224825837736017346685300744985564715420036123593397312914458591522887408719508708632218837288262822884631843717261903305777147651564143822306791847386039147683108141358275755853643597721650028277803713422869688787349795096031108899196143386664068450697420787700280509367203387232629637856038653216432348815557557018469089074647879122436375556668678067610544955017260791142930831285761254481944449473244819093795369008206384631678225064809531810406570254327604385703505922818919878065865412184299217273720955103242251079718077833042609086794273428955735559252723805511440438001239041687716445180226491681641927401106451622431101700056691121733189423400547959684669804298017362570406733282129962153684881404102194463424646220745575643960452985313071409084608499653767803793201899140865814662175319337665970114330608625009829566917638846056762972931464911493704624469351984039534449135141193667933301936617663652555149174982307987072280860859626112660504289296966535652516688885572112276802772743708917389639772257564890533401038855931125679991516589025016486961427207005916056166159702451989051832969278935550303934681219761582183980483960562523091462638447386296039848924386187298507775928792722068554807210497817653286210187476766897248841139560349480376727036316921007350834073865261684507482496448597428134936480372426116704266870831925040997615319076855770327421785010006441984124207396400139603601583810565928413684574119102736420274163723488214524101347716529603128408658419787951116511529827814620379139855006399960326591248525308493690313130100799977191362230866011099929142871249388541612038020411340188887219693477904497527454288072803509305828754420755134816660927879353566521255620139988249628478726214432362853676502591450468377635282587652139156480972141929675549384375582600253168536356731379262475878049445944183429172756988376226261846365452743497662411138451305481449836311789784489732076719508784158618879692955819733250699951402601511675529750575437810242238957925786562128432731202200716730574069286869363930186765958251326499145950260917069347519408975357464016830811798846452473618956056479426358070562563281189269663026479535951097127659136233180866921535788607812759910537171402204506186075374866306350591483916467656723205714516886170790984695932236724946737583099607042589220481550799132752088583781117685214269334786921895240622657921043620348852926267984013953216458791151579050460579710838983371864038024417511347226472547010794793996953554669619726763255229914654933499663234185951450360980344092212206712567698723427940708857070474293173329188523896721971353924492426178641188637790962814486917869468177591717150669111480020759432012061969637795103227089029566085562225452602610460736131368869009281721068198618553780982018471154163630326265699283424155023600978046417108525537612728905335045506135684143775854429677977014660294387687225115363801191758154028120818255606485410787933598921064427244898618961629413418001295130683638609294100083136673372153008352696235737175330738653338204842190308186449184093723944033405244909554558016406460761581010301767488475017661908692946098769201691202181688291040870709560951470416921147027413390052253340834812870353031023919699978597413908593605433599697075604460134242453682496098772581311024732798562072126572499003468293886872304895562253204463602639854225258416464324271611419817802482595563544907219226583863662663750835944314877635156145710745528016159677048442714194435183275698407552677926411261765250615965235457187956673170913319358761628255920783080185206890151504713340386100310055914817852110384754542933389188444120517943969970194112695119526564919594189975418393234647424290702718875223534393673633663200307232747037407123982562024662651974090199762452056198557625760008708173083288344381831070054514493545885422678578551915372292379555494333410174420169600090696415612732297770221217951868376359082255128816470021992348864043959153018464004714321186360622527011541122283802778538911098490201342741014121559769965438877197485376431158229838533123071751132961904559007938064276695819014842627991221792947987348901868471676503827328552059082984529806259250352128451925927986593506132961946796252373972565584157853744567558998032405492186962888490332560851455344391660226257775512916200772796852629387937530454181080729285891989715381797343496187232927614747850192611450413274873242970583408471112333746274617274626582415324271059322506255302314738759251724787322881491455915605036334575424233779160374952502493022351481961381162563911415610326844958072508273431765944054098269765269344579863479709743124498271933113863873159636361218623497261409556079920628316999420072054811525353393946076850019909886553861433495781650089961649079678142901148387645682174914075623767618453775144031475411206760160726460556859257799322070337333398916369504346690694828436629980037414527627716547623825546170883189810868806847853705536480469350958818025360529740793538676511195079373282083146268960071075175520614433784114549950136432446328193346389050936545714506900864483440180428363390513578157273973334537284263372174065775771079830517555721036795976901889958494130195999573017901240193908681356585539661941371794487632079868800371607303220547423572266896801882123424391885984168972277652194032493227314793669234004848976059037958094696041754279613782553781223947646147832926976545162290281701100437846038756544151739433960048915318817576650500951697402415644771293656614253949368884230517400129920556854289853897942669956777027089146513736892206104415481662156804219838476730871787590279209175900695273456682026513373111518000181434120962601658629821076663523361774007837783423709152644063054071807843358061072961105550020415131696373046849213356837265400307509829089364612047891114753037049893952833457824082817386441322710002968311940203323456420826473276233830294639378998375836554559919340866235090967961134004867027123176526663710778725111860354037554487418693519733656621772359229396776463251562023487570113795712096237723431370212031004965152111976013176419408203437348512852602913334915125083119802850177855710725373149139215709105130965059885999931560863655477403551898166733535880048214665099741433761182777723351910741217572841592580872591315074606025634903777263373914461377038021318347447301113032670296917335047701632106616227830027269283365584011791419447808748253360714403296252285775009808599609040936312635621328162071453406104224112083010008587264252112262480142647519426184325853386753874054743491072710049754281159466017136122590440158991600229827801796035194080046513534752698777609527839984368086908989197839693532179980139135442552717910225397010810632143048511378291498511381969143043497500189980681644412123273328307192824362406733196554692677851193152775113446468905504248113361434984604849051258345683266441528489713972376040328212660253516693914082049947320486021627759791771234751097502403078935759937715095021751693555827072533911892334070223832077585802137174778378778391015234132098489423459613692340497998279304144463162707214796117456975719681239291913740982925805561955207434243295982898980529233366415419256367380689494201471241340525072204061794355252555225008748790086568314542835167750542294803274783044056438581591952666758282929705226127628711040134801787224801789684052407924360582742467443076721645270313451354167649668901274786801010295133862698649748212118629040337691568576240699296372493097201628707200189835423690364149270236961938547372480329855045112089192879829874467864129159417531675602533435310626745254507114181483239880607297140234725520713490798398982355268723950909365667878992383712578976248755990443228895388377317348941122757071410959790047919301046740750411435381782464630795989555638991884773781341347070246747362112048986226991888517456251732519341352038115863350123913054441910073628447567514161050410973505852762044489190978901984315485280533985777844313933883994310444465669244550885946314081751220331390681596592510546858013133838152176418210433429788826119630443111388796258746090226130900849975430395771243230616906262919403921439740270894777663702488155499322458825979020631257436910946393252806241642476868495455324938017639371615636847859823715902385421265840615367228607131702674740131145261063765383390315921943469817605358380310612887852051546933639241088467632009567089718367490578163085158138161966882222047570437590614338040725853862083565176998426774523195824182683698270160237414938363496629351576854061397342746470899685618170160551104880971554859118617189668025973541705423985135560018720335079060946421271143993196046527424050882225359773481519135438571253258540493946010865793798058620143366078825219717809025817370870916460452727977153509910340736425020386386718220522879694458387652947951048660717390229327455426785669776865939923416834122274663015062155320502655341460995249356050854921756549134830958906536175693817637473644183378974229700703545206663170929607591989627732423090252397443861014263098687733913882518684316501027964911497737582888913450341148865948670215492101084328080783428089417298008983297536940644969903125399863919581601468995220880662285408414864274786281975546629278814621607171381880180840572084715868906836919393381864278454537956719272397972364651667592011057995663962598535512763558768140213409829016296873429850792471846056874828331381259161962476156902875901072733103299140623864608333378638257926302391590003557609032477281338887339178096966601469615031754226751125993315529674213336300222964906480934582008181061802100227664580400278213336758573019011371754672763059044353131319036092489097246427928455549913490005180295707082919052556781889913899625138662319380053611346224294610248954072404857123256628888931722116432947816190554868054943441034090680716088028227959686950133643814268252170472870863010137301155236861416908375675747637239763185757038109443390564564468524183028148107998376918512127201935044041804604721626939445788377090105974693219720558114078775989772072009689382249303236830515862657281114637996983137517937623215111252349734305240622105244234353732905655163406669506165892878218707756794176080712973781335187117931650033155523822487730653444179453415395202424449703410120874072188109388268167512042299404948179449472732894770111574139441228455521828424922240658752689172272780607116754046973008037039618787796694882555614674384392570115829546661358678671897661297311267200072971553613027503556167817765442287442114729881614802705243806817653573275578602505847084013208837932816008769081300492491473682517035382219619039014999523495387105997351143478292339499187936608692301375596368532373806703591144243268561512109404259582639301678017128669239283231057658851714020211196957064799814031505633045141564414623163763809904402816256917576489142569714163598439317433270237812336938043012892626375382667795034169334323607500248175741808750388475094939454896209740485442635637164995949920980884294790363666297526003243856352945844728944547166209297495496616877414120882130477022816116456044007236351581149729739218966737382647204722642221242016560150284971306332795814302516013694825567014780935790889657134926158161346901806965089556310121218491805847922720691871696316330044858020102860657858591269974637661741463934159569539554203314628026518951167938074573315759846086173702687867602943677780500244673391332431669880354073232388281847501051641331189537036488422690270478052742490603492082954755054003457160184072574536938145531175354210726557835615499874447480427323457880061873149341566046352979779455075359304795687209316724536547208381685855606043801977030764246083489876101345709394877002946175792061952549255757109038525171488525265671045349813419803390641529876343695420256080277614421914318921393908834543131769685101840103844472348948869520981943531906506555354617335814045544837884752526253949665869992058417652780125341033896469818642430034146791380619028059607854888010789705516946215228773090104467462497979992627120951684779568482583341402266477210843362437593741610536734041954738964197895425335036301861400951534766961476255651873823292468547356935802896011536791787303553159378363082248615177770541577576561759358512016692943111138863582159667618830326104164651714846979385422621687161400122378213779774131268977266712992025922017408770076956283473932201088159356286281928563571893384958850603853158179760679479840878360975960149733420572704603521790605647603285569276273495182203236144112584182426247712012035776388895974318232827871314608053533574494297621796789034568169889553518504478325616380709476951699086247100019748809205009521943632378719764870339223811540363475488626845956159755193765410115014067001226927474393888589943859730245414801061235908036274585288493563251585384383242493252666087588908318700709100237377106576985056433928854337658342596750653715005333514489908293887737352051459333049626531415141386124437935885070944688045486975358170212908490787347806814366323322819415827345671356443171537967818058195852464840084032909981943781718177302317003989733050495387356116261023999433259780126893432605584710278764901070923443884634011735556865903585244919370181041626208504299258697435817098133894045934471937493877624232409852832762266604942385129709453245586252103600829286649724174919141988966129558076770979594795306013119159011773943104209049079424448868513086844493705909026006120649425744710353547657859242708130410618546219881830090634588187038755856274911587375421064667951346487586771543838018521348281915812462599335160198935595167968932852205824799421034512715877163345222995418839680448835529753361286837225935390079201666941339091168758803988828869216002373257361588207163516271332810518187602104852180675526648673908900907195138058626735124312215691637902277328705410842037841525683288718046987952513073266340278519059417338920358540395677035611329354482585628287610610698229721420961993509331312171187891078766872044548876089410174798647137882462153955933333275562009439580434537919782280590395959927436913793778664940964048777841748336432684026282932406260081908081804390914556351936856063045089142289645219987798849347477729132797266027658401667890136490508741142126861969862044126965282981087045479861559545338021201155646979976785738920186243599326777689454060508218838227909833627167124490026761178498264377033002081844590009717235204331994708242098771514449751017055643029542821819670009202515615844174205933658148134902693111517093872260026458630561325605792560927332265579346280805683443921373688405650434307396574061017779370141424615493070741360805442100295600095663588977899267630517718781943706761498217564186590116160865408635391513039201316805769034172596453692350806417446562351523929050409479953184074862151210561833854566176652606393713658802521666223576132201941701372664966073252010771947931265282763302413805164907174565964853748354669194523580315301969160480994606814904037819829732360930087135760798621425422096419004367905479049930078372421581954535418371129368658430553842717628035279128821129308351575656599944741788438381565148434229858704245592434693295232821803508333726283791830216591836181554217157448465778420134329982594566884558266171979012180849480332448787258183774805522268151011371745368417870280274452442905474518234674919564188551244421337783521423865979925988203287085109338386829906571994614906290257427686038850511032638544540419184958866538545040571323629681069146814847869659166861842756798460041868762298055562963045953227923051616721591968675849523635298935788507746081537321454642984792310511676357749494622952569497660359473962430995343310404994209677883827002714478494069037073249106444151696053256560586778757417472110827435774315194060757983563629143326397812218946287447798119807225646714664054850131009656786314880090303749338875364183165134982546694673316118123364854397649325026179549357204305402182974871251107404011611405899911093062492312813116340549262571356721818628932786138833718028535056503591952741400869510926167541476792668032109237467087213606278332922386413619594121339278036118276324106004740971111048140003623342714514483334641675466354699731494756643423659493496845884551524150756376605086632827424794136062876041290644913828519456402643153225858624043141838669590633245063000392213192647625962691510904457695301444054618037857503036686212462278639752746667870121003392984873375014475600322100622358029343774955032037012738468163061026570300872275462966796880890587127676361066225722352229739206443093524327228100859973095132528630601105497915644791845004618046762408928925680912930592960642357021061524646205023248966593987324933967376952023991760898474571843531936646529125848064480196520162838795189499336759241485626136995945307287254532463291529110128763770605570609531377527751867923292134955245133089867969165129073841302167573238637575820080363575728002754490327953079900799442541108725693188014667935595834676432868876966610097395749967836593397846346959948950610490383647409504695226063858046758073069912290474089879166872117147527644711604401952718169508289733537148530928937046384420893299771125856840846608339934045689026787516008775461267988015465856522061210953490796707365539702576199431376639960606061106406959330828171876426043573425361756943784848495250108266488395159700490598380812105221111091943323951136051446459834210799058082093716464523127704023160072138543723461267260997870385657091998507595634613248460188409850194287687902268734556500519121546544063829253851276317663922050938345204300773017029940362615434001322763910912988327863920412300445551684054889809080779174636092439334912641164240093880746356607262336695842764583698268734815881961058571835767462009650526065929263548291499045768307210893245857073701660717398194485028842603963660746031184786225831056580870870305567595861341700745402965687634774176431051751036732869245558582082372038601781739405175130437994868822320044378043103170921034261674998000073016094814586374488778522273076330495383944345382770608760763542098445008306247630253572781032783461766970544287155315340016497076657195985041748199087201490875686037783591994719343352772947285537925787684832301101859365800717291186967617655053775030293033830706448912811412025506150896411007623824574488655182581058140345320124754723269087547507078577659732542844459353044992070014538748948226556442223696365544194225441338212225477497535494624827680533336983284156138692363443358553868471111430498248398991803165458638289353799130535222833430137953372954016257623228081138499491876144141322933767106563492528814528239506209022357876684650116660097382753660405446941653422239052108314585847035529352219928272760574821266065291385530345549744551470344939486863429459658431024190785923680224560763936784166270518555178702904073557304620639692453307795782245949710420188043000183881429008173039450507342787013124466860092778581811040911511729374873627887874907465285565434748886831064110051023020875107768918781525622735251550379532444857787277617001964853703555167655209119339343762866284619844026295252183678522367475108809781507098978413086245881522660963551401874495836926917799047120726494905737264286005211403581231076006699518536124862746756375896225299116496066876508261734178484789337295056739007878617925351440621045366250640463728815698232317500596261080921955211150859302955654967538862612972339914628358476048627627027309739202001432248707582337354915246085608210328882974183906478869923273691360048837436615223517058437705545210815513361262142911815615301758882573594892507108879262128641392443309383797333867806131795237315266773820858024701433527009243803266951742119507670884326346442749127558907746863582162166042741315170212458586056233631493164646913946562497471741958354218607748711057338458433689939645913740603382159352243594751626239188685307822821763983237306180204246560477527943104796189724299533029792497481684052893791044947004590864991872727345413508101983881864673609392571930511968645601855782450218231065889437986522432050677379966196955472440585922417953006820451795370043472451762893566770508490213107736625751697335527462302943031203596260953423574397249659211010657817826108745318874803187430823573699195156340957162700992444929749105489851519658664740148225106335367949737142510229341882585117371994499115097583746130105505064197721531929354875371191630262030328588658528480193509225875775597425276584011721342323648084027143356367542046375182552524944329657043861387865901965738802868401894087672816714137033661732650120578653915780703088714261519075001492576112927675193096728453971160213606303090542243966320674323582797889332324405779199278484633339777737655901870574806828678347965624146102899508487399692970750432753029972872297327934442988646412725348160603779707298299173029296308695801996312413304939350493325412355071054461182591141116454534710329881047844067780138077131465400099386306481266614330858206811395838319169545558259426895769841428893743467084107946318932539106963955780706021245974898293564613560788983472419979478564362042094613412387613198865352358312996862268948608408456655606876954501274486631405054735351746873009806322780468912246821460806727627708402402266155485024008952891657117617439020337584877842911289623247059191874691042005848326140677333751027195653994697162517248312230633919328707983800748485726516123434933273356664473358556430235280883924348278760886164943289399166399210488307847777048045728491456303353265070029588906265915498509407972767567129795010098229476228961891591441520032283878773485130979081019129267227103778898053964156362364169154985768408398468861684375407065121039062506128107663799047908879674778069738473170475253442156390387201238806323688037017949308954900776331523063548374256816653361606641980030188287123767481898330246836371488309259283375902278942588060087286038859168849730693948020511221766359138251524278670094406942355120201568377778851824670025651708509249623747726813694284350062938814429987905301056217375459182679973217735029368928065210025396268807498092643458011655715886700443503976505323478287327368840863540002740676783821963522226539290939807367391364082898722017776747168118195856133721583119054682936083236976113450281757830202934845982925000895682630271263295866292147653142233351793093387951357095346377183684092444422096319331295620305575517340067973740614162107923633423805646850092037167152642556371853889571416419772387422610596667396997173168169415435095283193556417705668622215217991151355639707143312893657553844648326201206424338016955862698561022460646069330793847858814367407000599769703649019273328826135329363112403650698652160638987250267238087403396744397830258296894256896741864336134979475245526291426522842419243083388103580053787023999542172113686550275341362211693140694669513186928102574795985605145005021715913317751609957865551981886193211282110709442287240442481153406055895958355815232012184605820563592699303478851132068626627588771446035996656108430725696500563064489187599466596772847171539573612108180841547273142661748933134174632662354222072600146012701206934639520564445543291662986660783089068118790090815295063626782075614388815781351134695366303878412092346942868730839320432333872775496805210302821544324723388845215343727250128589747691460808314404125868181540049187772287869801853454537006526655649170915429522756709222217474112062720656622989806032891672068743654948246108697367225547404812889242471854323605753411672850757552057131156697954584887398742228135887985840783135060548290551482785294891121905383195624228719484759407859398047901094194070671764439032730712135887385049993638838205501683402777496070276844880281912220636888636811043569529300652195528261526991271637277388418993287130563464688227398288763198645709836308917786487086676185485680047672552675414742851028145807403152992197814557756843681110185317498167016426647884090262682824448258027532094549915104518517716546311804904567985713257528117913656278158111288816562285876030875974963849435275676612168959261485030785362045274507752950631012480341804584059432926079854435620093708091821523920371790678121992280496069738238743312626730306795943960954957189577217915597300588693646845576676092450906088202212235719254536715191834872587423919410890444115959932760044506556206461164655665487594247369252336955993030355095817626176231849561906494839673002037763874369343999829430209147073618947932692762445186560239559053705128978163455423320114975994896278424327483788032701418676952621180975006405149755889650293004867605208010491537885413909424531691719987628941277221129464568294860281493181560249677887949813777216229359437811004448060797672429276249510784153446429150842764520002042769470698041775832209097020291657347251582904630910359037842977572651720877244740952267166306005469716387943171196873484688738186656751279298575016363411314627530499019135646823804329970695770150789337728658035712790913767420805655493624646 diff --git a/vendor/github.com/klauspost/compress/testdata/pngdata.bin b/vendor/github.com/klauspost/compress/testdata/pngdata.bin new file mode 100644 index 00000000000..f48a75c903d Binary files /dev/null and b/vendor/github.com/klauspost/compress/testdata/pngdata.bin differ diff --git a/vendor/github.com/klauspost/compress/testdata/sharnd.out b/vendor/github.com/klauspost/compress/testdata/sharnd.out new file mode 100644 index 00000000000..b474465e04a Binary files /dev/null and b/vendor/github.com/klauspost/compress/testdata/sharnd.out differ diff --git a/vendor/github.com/klauspost/compress/zip/example_test.go b/vendor/github.com/klauspost/compress/zip/example_test.go new file mode 100644 index 00000000000..8527b8e2eba --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/example_test.go @@ -0,0 +1,105 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip_test + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/zip" +) + +func ExampleWriter() { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new zip archive. + w := zip.NewWriter(buf) + + // Add some files to the archive. + var files = []struct { + Name, Body string + }{ + {"readme.txt", "This archive contains some text files."}, + {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, + {"todo.txt", "Get animal handling licence.\nWrite more examples."}, + } + for _, file := range files { + f, err := w.Create(file.Name) + if err != nil { + log.Fatal(err) + } + _, err = f.Write([]byte(file.Body)) + if err != nil { + log.Fatal(err) + } + } + + // Make sure to check the error on Close. + err := w.Close() + if err != nil { + log.Fatal(err) + } +} + +func ExampleReader() { + // Open a zip archive for reading. + r, err := zip.OpenReader("testdata/readme.zip") + if err != nil { + log.Fatal(err) + } + defer r.Close() + + // Iterate through the files in the archive, + // printing some of their contents. + for _, f := range r.File { + fmt.Printf("Contents of %s:\n", f.Name) + rc, err := f.Open() + if err != nil { + log.Fatal(err) + } + _, err = io.CopyN(os.Stdout, rc, 68) + if err != nil { + log.Fatal(err) + } + rc.Close() + fmt.Println() + } + // Output: + // Contents of README: + // This is the source code repository for the Go programming language. +} + +func ExampleWriter_RegisterCompressor() { + // Override the default Deflate compressor with a higher compression + // level. + + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new zip archive. + w := zip.NewWriter(buf) + + var fw *flate.Writer + + // Register the deflator. + w.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) { + var err error + if fw == nil { + // Creating a flate compressor for every file is + // expensive, create one and reuse it. + fw, err = flate.NewWriter(out, flate.BestCompression) + } else { + fw.Reset(out) + } + return fw, err + }) + + // Proceed to add files to w. +} diff --git a/vendor/github.com/klauspost/compress/zip/reader.go b/vendor/github.com/klauspost/compress/zip/reader.go new file mode 100644 index 00000000000..9a0e20db1e1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/reader.go @@ -0,0 +1,520 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "os" +) + +var ( + ErrFormat = errors.New("zip: not a valid zip file") + ErrAlgorithm = errors.New("zip: unsupported compression algorithm") + ErrChecksum = errors.New("zip: checksum error") +) + +type Reader struct { + r io.ReaderAt + File []*File + Comment string + decompressors map[uint16]Decompressor +} + +type ReadCloser struct { + f *os.File + Reader +} + +type File struct { + FileHeader + zip *Reader + zipr io.ReaderAt + zipsize int64 + headerOffset int64 +} + +func (f *File) hasDataDescriptor() bool { + return f.Flags&0x8 != 0 +} + +// OpenReader will open the Zip file specified by name and return a ReadCloser. +func OpenReader(name string) (*ReadCloser, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + r := new(ReadCloser) + if err := r.init(f, fi.Size()); err != nil { + f.Close() + return nil, err + } + r.f = f + return r, nil +} + +// NewReader returns a new Reader reading from r, which is assumed to +// have the given size in bytes. +func NewReader(r io.ReaderAt, size int64) (*Reader, error) { + zr := new(Reader) + if err := zr.init(r, size); err != nil { + return nil, err + } + return zr, nil +} + +func (z *Reader) init(r io.ReaderAt, size int64) error { + end, err := readDirectoryEnd(r, size) + if err != nil { + return err + } + if end.directoryRecords > uint64(size)/fileHeaderLen { + return fmt.Errorf("archive/zip: TOC declares impossible %d files in %d byte zip", end.directoryRecords, size) + } + z.r = r + z.File = make([]*File, 0, end.directoryRecords) + z.Comment = end.comment + rs := io.NewSectionReader(r, 0, size) + if _, err = rs.Seek(int64(end.directoryOffset), os.SEEK_SET); err != nil { + return err + } + buf := bufio.NewReader(rs) + + // The count of files inside a zip is truncated to fit in a uint16. + // Gloss over this by reading headers until we encounter + // a bad one, and then only report a ErrFormat or UnexpectedEOF if + // the file count modulo 65536 is incorrect. + for { + f := &File{zip: z, zipr: r, zipsize: size} + err = readDirectoryHeader(f, buf) + if err == ErrFormat || err == io.ErrUnexpectedEOF { + break + } + if err != nil { + return err + } + z.File = append(z.File, f) + } + if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here + // Return the readDirectoryHeader error if we read + // the wrong number of directory entries. + return err + } + return nil +} + +// RegisterDecompressor registers or overrides a custom decompressor for a +// specific method ID. If a decompressor for a given method is not found, +// Reader will default to looking up the decompressor at the package level. +// +// Must not be called concurrently with Open on any Files in the Reader. +func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { + if z.decompressors == nil { + z.decompressors = make(map[uint16]Decompressor) + } + z.decompressors[method] = dcomp +} + +func (z *Reader) decompressor(method uint16) Decompressor { + dcomp := z.decompressors[method] + if dcomp == nil { + dcomp = decompressor(method) + } + return dcomp +} + +// Close closes the Zip file, rendering it unusable for I/O. +func (rc *ReadCloser) Close() error { + return rc.f.Close() +} + +// DataOffset returns the offset of the file's possibly-compressed +// data, relative to the beginning of the zip file. +// +// Most callers should instead use Open, which transparently +// decompresses data and verifies checksums. +func (f *File) DataOffset() (offset int64, err error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return + } + return f.headerOffset + bodyOffset, nil +} + +// Open returns a ReadCloser that provides access to the File's contents. +// Multiple files may be read concurrently. +func (f *File) Open() (rc io.ReadCloser, err error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return + } + size := int64(f.CompressedSize64) + r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) + dcomp := f.zip.decompressor(f.Method) + if dcomp == nil { + err = ErrAlgorithm + return + } + rc = dcomp(r) + var desr io.Reader + if f.hasDataDescriptor() { + desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) + } + rc = &checksumReader{ + rc: rc, + hash: crc32.NewIEEE(), + f: f, + desr: desr, + } + return +} + +type checksumReader struct { + rc io.ReadCloser + hash hash.Hash32 + nread uint64 // number of bytes read so far + f *File + desr io.Reader // if non-nil, where to read the data descriptor + err error // sticky error +} + +func (r *checksumReader) Read(b []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, err = r.rc.Read(b) + r.hash.Write(b[:n]) + r.nread += uint64(n) + if err == nil { + return + } + if err == io.EOF { + if r.nread != r.f.UncompressedSize64 { + return 0, io.ErrUnexpectedEOF + } + if r.desr != nil { + if err1 := readDataDescriptor(r.desr, r.f); err1 != nil { + if err1 == io.EOF { + err = io.ErrUnexpectedEOF + } else { + err = err1 + } + } else if r.hash.Sum32() != r.f.CRC32 { + err = ErrChecksum + } + } else { + // If there's not a data descriptor, we still compare + // the CRC32 of what we've read against the file header + // or TOC's CRC32, if it seems like it was set. + if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 { + err = ErrChecksum + } + } + } + r.err = err + return +} + +func (r *checksumReader) Close() error { return r.rc.Close() } + +// findBodyOffset does the minimum work to verify the file has a header +// and returns the file body offset. +func (f *File) findBodyOffset() (int64, error) { + var buf [fileHeaderLen]byte + if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil { + return 0, err + } + b := readBuf(buf[:]) + if sig := b.uint32(); sig != fileHeaderSignature { + return 0, ErrFormat + } + b = b[22:] // skip over most of the header + filenameLen := int(b.uint16()) + extraLen := int(b.uint16()) + return int64(fileHeaderLen + filenameLen + extraLen), nil +} + +// readDirectoryHeader attempts to read a directory header from r. +// It returns io.ErrUnexpectedEOF if it cannot read a complete header, +// and ErrFormat if it doesn't find a valid header signature. +func readDirectoryHeader(f *File, r io.Reader) error { + var buf [directoryHeaderLen]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + return err + } + b := readBuf(buf[:]) + if sig := b.uint32(); sig != directoryHeaderSignature { + return ErrFormat + } + f.CreatorVersion = b.uint16() + f.ReaderVersion = b.uint16() + f.Flags = b.uint16() + f.Method = b.uint16() + f.ModifiedTime = b.uint16() + f.ModifiedDate = b.uint16() + f.CRC32 = b.uint32() + f.CompressedSize = b.uint32() + f.UncompressedSize = b.uint32() + f.CompressedSize64 = uint64(f.CompressedSize) + f.UncompressedSize64 = uint64(f.UncompressedSize) + filenameLen := int(b.uint16()) + extraLen := int(b.uint16()) + commentLen := int(b.uint16()) + b = b[4:] // skipped start disk number and internal attributes (2x uint16) + f.ExternalAttrs = b.uint32() + f.headerOffset = int64(b.uint32()) + d := make([]byte, filenameLen+extraLen+commentLen) + if _, err := io.ReadFull(r, d); err != nil { + return err + } + f.Name = string(d[:filenameLen]) + f.Extra = d[filenameLen : filenameLen+extraLen] + f.Comment = string(d[filenameLen+extraLen:]) + + needUSize := f.UncompressedSize == ^uint32(0) + needCSize := f.CompressedSize == ^uint32(0) + needHeaderOffset := f.headerOffset == int64(^uint32(0)) + + if len(f.Extra) > 0 { + // Best effort to find what we need. + // Other zip authors might not even follow the basic format, + // and we'll just ignore the Extra content in that case. + b := readBuf(f.Extra) + for len(b) >= 4 { // need at least tag and size + tag := b.uint16() + size := b.uint16() + if int(size) > len(b) { + break + } + if tag == zip64ExtraId { + // update directory values from the zip64 extra block. + // They should only be consulted if the sizes read earlier + // are maxed out. + // See golang.org/issue/13367. + eb := readBuf(b[:size]) + + if needUSize { + needUSize = false + if len(eb) < 8 { + return ErrFormat + } + f.UncompressedSize64 = eb.uint64() + } + if needCSize { + needCSize = false + if len(eb) < 8 { + return ErrFormat + } + f.CompressedSize64 = eb.uint64() + } + if needHeaderOffset { + needHeaderOffset = false + if len(eb) < 8 { + return ErrFormat + } + f.headerOffset = int64(eb.uint64()) + } + break + } + b = b[size:] + } + } + + if needUSize || needCSize || needHeaderOffset { + return ErrFormat + } + + return nil +} + +func readDataDescriptor(r io.Reader, f *File) error { + var buf [dataDescriptorLen]byte + + // The spec says: "Although not originally assigned a + // signature, the value 0x08074b50 has commonly been adopted + // as a signature value for the data descriptor record. + // Implementers should be aware that ZIP files may be + // encountered with or without this signature marking data + // descriptors and should account for either case when reading + // ZIP files to ensure compatibility." + // + // dataDescriptorLen includes the size of the signature but + // first read just those 4 bytes to see if it exists. + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + off := 0 + maybeSig := readBuf(buf[:4]) + if maybeSig.uint32() != dataDescriptorSignature { + // No data descriptor signature. Keep these four + // bytes. + off += 4 + } + if _, err := io.ReadFull(r, buf[off:12]); err != nil { + return err + } + b := readBuf(buf[:12]) + if b.uint32() != f.CRC32 { + return ErrChecksum + } + + // The two sizes that follow here can be either 32 bits or 64 bits + // but the spec is not very clear on this and different + // interpretations has been made causing incompatibilities. We + // already have the sizes from the central directory so we can + // just ignore these. + + return nil +} + +func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) { + // look for directoryEndSignature in the last 1k, then in the last 65k + var buf []byte + var directoryEndOffset int64 + for i, bLen := range []int64{1024, 65 * 1024} { + if bLen > size { + bLen = size + } + buf = make([]byte, int(bLen)) + if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF { + return nil, err + } + if p := findSignatureInBlock(buf); p >= 0 { + buf = buf[p:] + directoryEndOffset = size - bLen + int64(p) + break + } + if i == 1 || bLen == size { + return nil, ErrFormat + } + } + + // read header into struct + b := readBuf(buf[4:]) // skip signature + d := &directoryEnd{ + diskNbr: uint32(b.uint16()), + dirDiskNbr: uint32(b.uint16()), + dirRecordsThisDisk: uint64(b.uint16()), + directoryRecords: uint64(b.uint16()), + directorySize: uint64(b.uint32()), + directoryOffset: uint64(b.uint32()), + commentLen: b.uint16(), + } + l := int(d.commentLen) + if l > len(b) { + return nil, errors.New("zip: invalid comment length") + } + d.comment = string(b[:l]) + + // These values mean that the file can be a zip64 file + if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff { + p, err := findDirectory64End(r, directoryEndOffset) + if err == nil && p >= 0 { + err = readDirectory64End(r, p, d) + } + if err != nil { + return nil, err + } + } + // Make sure directoryOffset points to somewhere in our file. + if o := int64(d.directoryOffset); o < 0 || o >= size { + return nil, ErrFormat + } + return d, nil +} + +// findDirectory64End tries to read the zip64 locator just before the +// directory end and returns the offset of the zip64 directory end if +// found. +func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) { + locOffset := directoryEndOffset - directory64LocLen + if locOffset < 0 { + return -1, nil // no need to look for a header outside the file + } + buf := make([]byte, directory64LocLen) + if _, err := r.ReadAt(buf, locOffset); err != nil { + return -1, err + } + b := readBuf(buf) + if sig := b.uint32(); sig != directory64LocSignature { + return -1, nil + } + if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory + return -1, nil // the file is not a valid zip64-file + } + p := b.uint64() // relative offset of the zip64 end of central directory record + if b.uint32() != 1 { // total number of disks + return -1, nil // the file is not a valid zip64-file + } + return int64(p), nil +} + +// readDirectory64End reads the zip64 directory end and updates the +// directory end with the zip64 directory end values. +func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) { + buf := make([]byte, directory64EndLen) + if _, err := r.ReadAt(buf, offset); err != nil { + return err + } + + b := readBuf(buf) + if sig := b.uint32(); sig != directory64EndSignature { + return ErrFormat + } + + b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16) + d.diskNbr = b.uint32() // number of this disk + d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory + d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk + d.directoryRecords = b.uint64() // total number of entries in the central directory + d.directorySize = b.uint64() // size of the central directory + d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number + + return nil +} + +func findSignatureInBlock(b []byte) int { + for i := len(b) - directoryEndLen; i >= 0; i-- { + // defined from directoryEndSignature in struct.go + if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 { + // n is length of comment + n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8 + if n+directoryEndLen+i <= len(b) { + return i + } + } + } + return -1 +} + +type readBuf []byte + +func (b *readBuf) uint16() uint16 { + v := binary.LittleEndian.Uint16(*b) + *b = (*b)[2:] + return v +} + +func (b *readBuf) uint32() uint32 { + v := binary.LittleEndian.Uint32(*b) + *b = (*b)[4:] + return v +} + +func (b *readBuf) uint64() uint64 { + v := binary.LittleEndian.Uint64(*b) + *b = (*b)[8:] + return v +} diff --git a/vendor/github.com/klauspost/compress/zip/reader_test.go b/vendor/github.com/klauspost/compress/zip/reader_test.go new file mode 100644 index 00000000000..8f7e8bf555d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/reader_test.go @@ -0,0 +1,644 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + "time" +) + +type ZipTest struct { + Name string + Source func() (r io.ReaderAt, size int64) // if non-nil, used instead of testdata/ file + Comment string + File []ZipTestFile + Error error // the error that Opening this file should return +} + +type ZipTestFile struct { + Name string + Content []byte // if blank, will attempt to compare against File + ContentErr error + File string // name of file to compare to (relative to testdata/) + Mtime string // modified time in format "mm-dd-yy hh:mm:ss" + Mode os.FileMode +} + +// Caution: The Mtime values found for the test files should correspond to +// the values listed with unzip -l . However, the values +// listed by unzip appear to be off by some hours. When creating +// fresh test files and testing them, this issue is not present. +// The test files were created in Sydney, so there might be a time +// zone issue. The time zone information does have to be encoded +// somewhere, because otherwise unzip -l could not provide a different +// time from what the archive/zip package provides, but there appears +// to be no documentation about this. + +var tests = []ZipTest{ + { + Name: "test.zip", + Comment: "This is a zipfile comment.", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte("This is a test text file.\n"), + Mtime: "09-05-10 12:12:02", + Mode: 0644, + }, + { + Name: "gophercolor16x16.png", + File: "gophercolor16x16.png", + Mtime: "09-05-10 15:52:58", + Mode: 0644, + }, + }, + }, + { + Name: "test-trailing-junk.zip", + Comment: "This is a zipfile comment.", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte("This is a test text file.\n"), + Mtime: "09-05-10 12:12:02", + Mode: 0644, + }, + { + Name: "gophercolor16x16.png", + File: "gophercolor16x16.png", + Mtime: "09-05-10 15:52:58", + Mode: 0644, + }, + }, + }, + { + Name: "r.zip", + Source: returnRecursiveZip, + File: []ZipTestFile{ + { + Name: "r/r.zip", + Content: rZipBytes(), + Mtime: "03-04-10 00:24:16", + Mode: 0666, + }, + }, + }, + { + Name: "symlink.zip", + File: []ZipTestFile{ + { + Name: "symlink", + Content: []byte("../target"), + Mode: 0777 | os.ModeSymlink, + }, + }, + }, + { + Name: "readme.zip", + }, + { + Name: "readme.notzip", + Error: ErrFormat, + }, + { + Name: "dd.zip", + File: []ZipTestFile{ + { + Name: "filename", + Content: []byte("This is a test textfile.\n"), + Mtime: "02-02-11 13:06:20", + Mode: 0666, + }, + }, + }, + { + // created in windows XP file manager. + Name: "winxp.zip", + File: crossPlatform, + }, + { + // created by Zip 3.0 under Linux + Name: "unix.zip", + File: crossPlatform, + }, + { + // created by Go, before we wrote the "optional" data + // descriptor signatures (which are required by OS X) + Name: "go-no-datadesc-sig.zip", + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mtime: "03-08-12 16:59:10", + Mode: 0644, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mtime: "03-08-12 16:59:12", + Mode: 0644, + }, + }, + }, + { + // created by Go, after we wrote the "optional" data + // descriptor signatures (which are required by OS X) + Name: "go-with-datadesc-sig.zip", + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mode: 0666, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mode: 0666, + }, + }, + }, + { + Name: "Bad-CRC32-in-data-descriptor", + Source: returnCorruptCRC32Zip, + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mode: 0666, + ContentErr: ErrChecksum, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mode: 0666, + }, + }, + }, + // Tests that we verify (and accept valid) crc32s on files + // with crc32s in their file header (not in data descriptors) + { + Name: "crc32-not-streamed.zip", + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mtime: "03-08-12 16:59:10", + Mode: 0644, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mtime: "03-08-12 16:59:12", + Mode: 0644, + }, + }, + }, + // Tests that we verify (and reject invalid) crc32s on files + // with crc32s in their file header (not in data descriptors) + { + Name: "crc32-not-streamed.zip", + Source: returnCorruptNotStreamedZip, + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mtime: "03-08-12 16:59:10", + Mode: 0644, + ContentErr: ErrChecksum, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mtime: "03-08-12 16:59:12", + Mode: 0644, + }, + }, + }, + { + Name: "zip64.zip", + File: []ZipTestFile{ + { + Name: "README", + Content: []byte("This small file is in ZIP64 format.\n"), + Mtime: "08-10-12 14:33:32", + Mode: 0644, + }, + }, + }, + // Another zip64 file with different Extras fields. (golang.org/issue/7069) + { + Name: "zip64-2.zip", + File: []ZipTestFile{ + { + Name: "README", + Content: []byte("This small file is in ZIP64 format.\n"), + Mtime: "08-10-12 14:33:32", + Mode: 0644, + }, + }, + }, +} + +var crossPlatform = []ZipTestFile{ + { + Name: "hello", + Content: []byte("world \r\n"), + Mode: 0666, + }, + { + Name: "dir/bar", + Content: []byte("foo \r\n"), + Mode: 0666, + }, + { + Name: "dir/empty/", + Content: []byte{}, + Mode: os.ModeDir | 0777, + }, + { + Name: "readonly", + Content: []byte("important \r\n"), + Mode: 0444, + }, +} + +func TestReader(t *testing.T) { + for _, zt := range tests { + readTestZip(t, zt) + } +} + +func readTestZip(t *testing.T, zt ZipTest) { + var z *Reader + var err error + if zt.Source != nil { + rat, size := zt.Source() + z, err = NewReader(rat, size) + } else { + var rc *ReadCloser + rc, err = OpenReader(filepath.Join("testdata", zt.Name)) + if err == nil { + defer rc.Close() + z = &rc.Reader + } + } + if err != zt.Error { + t.Errorf("%s: error=%v, want %v", zt.Name, err, zt.Error) + return + } + + // bail if file is not zip + if err == ErrFormat { + return + } + + // bail here if no Files expected to be tested + // (there may actually be files in the zip, but we don't care) + if zt.File == nil { + return + } + + if z.Comment != zt.Comment { + t.Errorf("%s: comment=%q, want %q", zt.Name, z.Comment, zt.Comment) + } + if len(z.File) != len(zt.File) { + t.Fatalf("%s: file count=%d, want %d", zt.Name, len(z.File), len(zt.File)) + } + + // test read of each file + for i, ft := range zt.File { + readTestFile(t, zt, ft, z.File[i]) + } + + // test simultaneous reads + n := 0 + done := make(chan bool) + for i := 0; i < 5; i++ { + for j, ft := range zt.File { + go func(j int, ft ZipTestFile) { + readTestFile(t, zt, ft, z.File[j]) + done <- true + }(j, ft) + n++ + } + } + for ; n > 0; n-- { + <-done + } +} + +func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { + if f.Name != ft.Name { + t.Errorf("%s: name=%q, want %q", zt.Name, f.Name, ft.Name) + } + + if ft.Mtime != "" { + mtime, err := time.Parse("01-02-06 15:04:05", ft.Mtime) + if err != nil { + t.Error(err) + return + } + if ft := f.ModTime(); !ft.Equal(mtime) { + t.Errorf("%s: %s: mtime=%s, want %s", zt.Name, f.Name, ft, mtime) + } + } + + testFileMode(t, zt.Name, f, ft.Mode) + + var b bytes.Buffer + r, err := f.Open() + if err != nil { + t.Errorf("%s: %v", zt.Name, err) + return + } + + _, err = io.Copy(&b, r) + if err != ft.ContentErr { + t.Errorf("%s: copying contents: %v (want %v)", zt.Name, err, ft.ContentErr) + } + if err != nil { + return + } + r.Close() + + size := uint64(f.UncompressedSize) + if size == uint32max { + size = f.UncompressedSize64 + } + if g := uint64(b.Len()); g != size { + t.Errorf("%v: read %v bytes but f.UncompressedSize == %v", f.Name, g, size) + } + + var c []byte + if ft.Content != nil { + c = ft.Content + } else if c, err = ioutil.ReadFile("testdata/" + ft.File); err != nil { + t.Error(err) + return + } + + if b.Len() != len(c) { + t.Errorf("%s: len=%d, want %d", f.Name, b.Len(), len(c)) + return + } + + for i, b := range b.Bytes() { + if b != c[i] { + t.Errorf("%s: content[%d]=%q want %q", f.Name, i, b, c[i]) + return + } + } +} + +func testFileMode(t *testing.T, zipName string, f *File, want os.FileMode) { + mode := f.Mode() + if want == 0 { + t.Errorf("%s: %s mode: got %v, want none", zipName, f.Name, mode) + } else if mode != want { + t.Errorf("%s: %s mode: want %v, got %v", zipName, f.Name, want, mode) + } +} + +func TestInvalidFiles(t *testing.T) { + const size = 1024 * 70 // 70kb + b := make([]byte, size) + + // zeroes + _, err := NewReader(bytes.NewReader(b), size) + if err != ErrFormat { + t.Errorf("zeroes: error=%v, want %v", err, ErrFormat) + } + + // repeated directoryEndSignatures + sig := make([]byte, 4) + binary.LittleEndian.PutUint32(sig, directoryEndSignature) + for i := 0; i < size-4; i += 4 { + copy(b[i:i+4], sig) + } + _, err = NewReader(bytes.NewReader(b), size) + if err != ErrFormat { + t.Errorf("sigs: error=%v, want %v", err, ErrFormat) + } +} + +func messWith(fileName string, corrupter func(b []byte)) (r io.ReaderAt, size int64) { + data, err := ioutil.ReadFile(filepath.Join("testdata", fileName)) + if err != nil { + panic("Error reading " + fileName + ": " + err.Error()) + } + corrupter(data) + return bytes.NewReader(data), int64(len(data)) +} + +func returnCorruptCRC32Zip() (r io.ReaderAt, size int64) { + return messWith("go-with-datadesc-sig.zip", func(b []byte) { + // Corrupt one of the CRC32s in the data descriptor: + b[0x2d]++ + }) +} + +func returnCorruptNotStreamedZip() (r io.ReaderAt, size int64) { + return messWith("crc32-not-streamed.zip", func(b []byte) { + // Corrupt foo.txt's final crc32 byte, in both + // the file header and TOC. (0x7e -> 0x7f) + b[0x11]++ + b[0x9d]++ + + // TODO(bradfitz): add a new test that only corrupts + // one of these values, and verify that that's also an + // error. Currently, the reader code doesn't verify the + // fileheader and TOC's crc32 match if they're both + // non-zero and only the second line above, the TOC, + // is what matters. + }) +} + +// rZipBytes returns the bytes of a recursive zip file, without +// putting it on disk and triggering certain virus scanners. +func rZipBytes() []byte { + s := ` +0000000 50 4b 03 04 14 00 00 00 08 00 08 03 64 3c f9 f4 +0000010 89 64 48 01 00 00 b8 01 00 00 07 00 00 00 72 2f +0000020 72 2e 7a 69 70 00 25 00 da ff 50 4b 03 04 14 00 +0000030 00 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 +0000040 b8 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 +0000050 2f 00 d0 ff 00 25 00 da ff 50 4b 03 04 14 00 00 +0000060 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 b8 +0000070 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 2f +0000080 00 d0 ff c2 54 8e 57 39 00 05 00 fa ff c2 54 8e +0000090 57 39 00 05 00 fa ff 00 05 00 fa ff 00 14 00 eb +00000a0 ff c2 54 8e 57 39 00 05 00 fa ff 00 05 00 fa ff +00000b0 00 14 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 +00000c0 88 21 c4 00 00 14 00 eb ff 42 88 21 c4 00 00 14 +00000d0 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 88 21 +00000e0 c4 00 00 00 00 ff ff 00 00 00 ff ff 00 34 00 cb +00000f0 ff 42 88 21 c4 00 00 00 00 ff ff 00 00 00 ff ff +0000100 00 34 00 cb ff 42 e8 21 5e 0f 00 00 00 ff ff 0a +0000110 f0 66 64 12 61 c0 15 dc e8 a0 48 bf 48 af 2a b3 +0000120 20 c0 9b 95 0d c4 67 04 42 53 06 06 06 40 00 06 +0000130 00 f9 ff 6d 01 00 00 00 00 42 e8 21 5e 0f 00 00 +0000140 00 ff ff 0a f0 66 64 12 61 c0 15 dc e8 a0 48 bf +0000150 48 af 2a b3 20 c0 9b 95 0d c4 67 04 42 53 06 06 +0000160 06 40 00 06 00 f9 ff 6d 01 00 00 00 00 50 4b 01 +0000170 02 14 00 14 00 00 00 08 00 08 03 64 3c f9 f4 89 +0000180 64 48 01 00 00 b8 01 00 00 07 00 00 00 00 00 00 +0000190 00 00 00 00 00 00 00 00 00 00 00 72 2f 72 2e 7a +00001a0 69 70 50 4b 05 06 00 00 00 00 01 00 01 00 35 00 +00001b0 00 00 6d 01 00 00 00 00` + s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "") + s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "") + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func returnRecursiveZip() (r io.ReaderAt, size int64) { + b := rZipBytes() + return bytes.NewReader(b), int64(len(b)) +} + +func TestIssue8186(t *testing.T) { + // Directory headers & data found in the TOC of a JAR file. + dirEnts := []string{ + "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\xaa\x1b\x06\xf0\x81\x02\x00\x00\x81\x02\x00\x00-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00res/drawable-xhdpi-v4/ic_actionbar_accept.png\xfe\xca\x00\x00\x00", + "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\x90K\x89\xc7t\n\x00\x00t\n\x00\x00\x0e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x02\x00\x00resources.arsc\x00\x00\x00", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xff$\x18\xed3\x03\x00\x00\xb4\b\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\r\x00\x00AndroidManifest.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\x14\xc5K\xab\x192\x02\x00\xc8\xcd\x04\x00\v\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x10\x00\x00classes.dex", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?E\x96\nD\xac\x01\x00\x00P\x03\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:C\x02\x00res/layout/actionbar_set_wallpaper.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?Ļ\x14\xe3\xd8\x01\x00\x00\xd8\x03\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:E\x02\x00res/layout/wallpaper_cropper.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?}\xc1\x15\x9eZ\x01\x00\x00!\x02\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`G\x02\x00META-INF/MANIFEST.MF", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xe6\x98Ьo\x01\x00\x00\x84\x02\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcH\x02\x00META-INF/CERT.SF", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xbfP\x96b\x86\x04\x00\x00\xb2\x06\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa9J\x02\x00META-INF/CERT.RSA", + } + for i, s := range dirEnts { + var f File + err := readDirectoryHeader(&f, strings.NewReader(s)) + if err != nil { + t.Errorf("error reading #%d: %v", i, err) + } + } +} + +// Verify we return ErrUnexpectedEOF when length is short. +func TestIssue10957(t *testing.T) { + data := []byte("PK\x03\x040000000PK\x01\x0200000" + + "0000000000000000000\x00" + + "\x00\x00\x00\x00\x00000000000000PK\x01" + + "\x020000000000000000000" + + "00000\v\x00\x00\x00\x00\x00000000000" + + "00000000000000PK\x01\x0200" + + "00000000000000000000" + + "00\v\x00\x00\x00\x00\x00000000000000" + + "00000000000PK\x01\x020000<" + + "0\x00\x0000000000000000\v\x00\v" + + "\x00\x00\x00\x00\x0000000000\x00\x00\x00\x00000" + + "00000000PK\x01\x0200000000" + + "0000000000000000\v\x00\x00\x00" + + "\x00\x0000PK\x05\x06000000\x05\x000000" + + "\v\x00\x00\x00\x00\x00") + z, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + t.Fatal(err) + } + for i, f := range z.File { + r, err := f.Open() + if err != nil { + continue + } + if f.UncompressedSize64 < 1e6 { + n, err := io.Copy(ioutil.Discard, r) + if i == 3 && err != io.ErrUnexpectedEOF { + t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err) + } + if err == nil && uint64(n) != f.UncompressedSize64 { + t.Errorf("file %d: bad size: copied=%d; want=%d", i, n, f.UncompressedSize64) + } + } + r.Close() + } +} + +// Verify the number of files is sane. +func TestIssue10956(t *testing.T) { + data := []byte("PK\x06\x06PK\x06\a0000\x00\x00\x00\x00\x00\x00\x00\x00" + + "0000PK\x05\x06000000000000" + + "0000\v\x00000\x00\x00\x00\x00\x00\x00\x000") + _, err := NewReader(bytes.NewReader(data), int64(len(data))) + const want = "TOC declares impossible 3472328296227680304 files in 57 byte" + if err == nil && !strings.Contains(err.Error(), want) { + t.Errorf("error = %v; want %q", err, want) + } +} + +// Verify we return ErrUnexpectedEOF when reading truncated data descriptor. +func TestIssue11146(t *testing.T) { + data := []byte("PK\x03\x040000000000000000" + + "000000\x01\x00\x00\x000\x01\x00\x00\xff\xff0000" + + "0000000000000000PK\x01\x02" + + "0000\b0\b\x00000000000000" + + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000PK\x05\x06\x00\x00" + + "\x00\x0000\x01\x0000008\x00\x00\x00\x00\x00") + z, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + t.Fatal(err) + } + r, err := z.File[0].Open() + if err != nil { + t.Fatal(err) + } + _, err = ioutil.ReadAll(r) + if err != io.ErrUnexpectedEOF { + t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err) + } + r.Close() +} + +// Verify we do not treat non-zip64 archives as zip64 +func TestIssue12449(t *testing.T) { + data := []byte{ + 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00, + 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64, + 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05, + 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, + 0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a, + 0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0, + 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, + 0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00, + 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64, + 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05, + 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, + 0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b, + 0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06, + 0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61, + 0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6, + 0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, + } + // Read in the archive. + _, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data))) + if err != nil { + t.Errorf("Error reading the archive: %v", err) + } +} diff --git a/vendor/github.com/klauspost/compress/zip/register.go b/vendor/github.com/klauspost/compress/zip/register.go new file mode 100644 index 00000000000..90b582d0ecc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/register.go @@ -0,0 +1,111 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "errors" + "io" + "io/ioutil" + "sync" + + "github.com/klauspost/compress/flate" +) + +// A Compressor returns a compressing writer, writing to the +// provided writer. On Close, any pending data should be flushed. +type Compressor func(io.Writer) (io.WriteCloser, error) + +// Decompressor is a function that wraps a Reader with a decompressing Reader. +// The decompressed ReadCloser is returned to callers who open files from +// within the archive. These callers are responsible for closing this reader +// when they're finished reading. +type Decompressor func(io.Reader) io.ReadCloser + +var flateWriterPool sync.Pool + +func newFlateWriter(w io.Writer) io.WriteCloser { + fw, ok := flateWriterPool.Get().(*flate.Writer) + if ok { + fw.Reset(w) + } else { + fw, _ = flate.NewWriter(w, 5) + } + return &pooledFlateWriter{fw: fw} +} + +type pooledFlateWriter struct { + mu sync.Mutex // guards Close and Write + fw *flate.Writer +} + +func (w *pooledFlateWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.fw == nil { + return 0, errors.New("Write after Close") + } + return w.fw.Write(p) +} + +func (w *pooledFlateWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.fw != nil { + err = w.fw.Close() + flateWriterPool.Put(w.fw) + w.fw = nil + } + return err +} + +var ( + mu sync.RWMutex // guards compressor and decompressor maps + + compressors = map[uint16]Compressor{ + Store: func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }, + Deflate: func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }, + } + + decompressors = map[uint16]Decompressor{ + Store: ioutil.NopCloser, + Deflate: flate.NewReader, + } +) + +// RegisterDecompressor allows custom decompressors for a specified method ID. +func RegisterDecompressor(method uint16, d Decompressor) { + mu.Lock() + defer mu.Unlock() + + if _, ok := decompressors[method]; ok { + panic("decompressor already registered") + } + decompressors[method] = d +} + +// RegisterCompressor registers custom compressors for a specified method ID. +// The common methods Store and Deflate are built in. +func RegisterCompressor(method uint16, comp Compressor) { + mu.Lock() + defer mu.Unlock() + + if _, ok := compressors[method]; ok { + panic("compressor already registered") + } + compressors[method] = comp +} + +func compressor(method uint16) Compressor { + mu.RLock() + defer mu.RUnlock() + return compressors[method] +} + +func decompressor(method uint16) Decompressor { + mu.RLock() + defer mu.RUnlock() + return decompressors[method] +} diff --git a/vendor/github.com/klauspost/compress/zip/struct.go b/vendor/github.com/klauspost/compress/zip/struct.go new file mode 100644 index 00000000000..5ee4f88f803 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/struct.go @@ -0,0 +1,313 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zip provides support for reading and writing ZIP archives. + +See: http://www.pkware.com/documents/casestudies/APPNOTE.TXT + +This package does not support disk spanning. + +A note about ZIP64: + +To be backwards compatible the FileHeader has both 32 and 64 bit Size +fields. The 64 bit fields will always contain the correct value and +for normal archives both fields will be the same. For files requiring +the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit +fields must be used instead. +*/ +package zip + +import ( + "os" + "path" + "time" +) + +// Compression methods. +const ( + Store uint16 = 0 + Deflate uint16 = 8 +) + +const ( + fileHeaderSignature = 0x04034b50 + directoryHeaderSignature = 0x02014b50 + directoryEndSignature = 0x06054b50 + directory64LocSignature = 0x07064b50 + directory64EndSignature = 0x06064b50 + dataDescriptorSignature = 0x08074b50 // de-facto standard; required by OS X Finder + fileHeaderLen = 30 // + filename + extra + directoryHeaderLen = 46 // + filename + extra + comment + directoryEndLen = 22 // + comment + dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size + dataDescriptor64Len = 24 // descriptor with 8 byte sizes + directory64LocLen = 20 // + directory64EndLen = 56 // + extra + + // Constants for the first byte in CreatorVersion + creatorFAT = 0 + creatorUnix = 3 + creatorNTFS = 11 + creatorVFAT = 14 + creatorMacOSX = 19 + + // version numbers + zipVersion20 = 20 // 2.0 + zipVersion45 = 45 // 4.5 (reads and writes zip64 archives) + + // limits for non zip64 files + uint16max = (1 << 16) - 1 + uint32max = (1 << 32) - 1 + + // extra header id's + zip64ExtraId = 0x0001 // zip64 Extended Information Extra Field +) + +// FileHeader describes a file within a zip file. +// See the zip spec for details. +type FileHeader struct { + // Name is the name of the file. + // It must be a relative path: it must not start with a drive + // letter (e.g. C:) or leading slash, and only forward slashes + // are allowed. + Name string + + CreatorVersion uint16 + ReaderVersion uint16 + Flags uint16 + Method uint16 + ModifiedTime uint16 // MS-DOS time + ModifiedDate uint16 // MS-DOS date + CRC32 uint32 + CompressedSize uint32 // Deprecated: Use CompressedSize64 instead. + UncompressedSize uint32 // Deprecated: Use UncompressedSize64 instead. + CompressedSize64 uint64 + UncompressedSize64 uint64 + Extra []byte + ExternalAttrs uint32 // Meaning depends on CreatorVersion + Comment string +} + +// FileInfo returns an os.FileInfo for the FileHeader. +func (h *FileHeader) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + fh *FileHeader +} + +func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) } +func (fi headerFileInfo) Size() int64 { + if fi.fh.UncompressedSize64 > 0 { + return int64(fi.fh.UncompressedSize64) + } + return int64(fi.fh.UncompressedSize) +} +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.fh.ModTime() } +func (fi headerFileInfo) Mode() os.FileMode { return fi.fh.Mode() } +func (fi headerFileInfo) Sys() interface{} { return fi.fh } + +// FileInfoHeader creates a partially-populated FileHeader from an +// os.FileInfo. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) { + size := fi.Size() + fh := &FileHeader{ + Name: fi.Name(), + UncompressedSize64: uint64(size), + } + fh.SetModTime(fi.ModTime()) + fh.SetMode(fi.Mode()) + if fh.UncompressedSize64 > uint32max { + fh.UncompressedSize = uint32max + } else { + fh.UncompressedSize = uint32(fh.UncompressedSize64) + } + return fh, nil +} + +type directoryEnd struct { + diskNbr uint32 // unused + dirDiskNbr uint32 // unused + dirRecordsThisDisk uint64 // unused + directoryRecords uint64 + directorySize uint64 + directoryOffset uint64 // relative to file + commentLen uint16 + comment string +} + +// msDosTimeToTime converts an MS-DOS date and time into a time.Time. +// The resolution is 2s. +// See: http://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx +func msDosTimeToTime(dosDate, dosTime uint16) time.Time { + return time.Date( + // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980 + int(dosDate>>9+1980), + time.Month(dosDate>>5&0xf), + int(dosDate&0x1f), + + // time bits 0-4: second/2; 5-10: minute; 11-15: hour + int(dosTime>>11), + int(dosTime>>5&0x3f), + int(dosTime&0x1f*2), + 0, // nanoseconds + + time.UTC, + ) +} + +// timeToMsDosTime converts a time.Time to an MS-DOS date and time. +// The resolution is 2s. +// See: http://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx +func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { + t = t.In(time.UTC) + fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9) + fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11) + return +} + +// ModTime returns the modification time in UTC. +// The resolution is 2s. +func (h *FileHeader) ModTime() time.Time { + return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime) +} + +// SetModTime sets the ModifiedTime and ModifiedDate fields to the given time in UTC. +// The resolution is 2s. +func (h *FileHeader) SetModTime(t time.Time) { + h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t) +} + +const ( + // Unix constants. The specification doesn't mention them, + // but these seem to be the values agreed on by tools. + s_IFMT = 0xf000 + s_IFSOCK = 0xc000 + s_IFLNK = 0xa000 + s_IFREG = 0x8000 + s_IFBLK = 0x6000 + s_IFDIR = 0x4000 + s_IFCHR = 0x2000 + s_IFIFO = 0x1000 + s_ISUID = 0x800 + s_ISGID = 0x400 + s_ISVTX = 0x200 + + msdosDir = 0x10 + msdosReadOnly = 0x01 +) + +// Mode returns the permission and mode bits for the FileHeader. +func (h *FileHeader) Mode() (mode os.FileMode) { + switch h.CreatorVersion >> 8 { + case creatorUnix, creatorMacOSX: + mode = unixModeToFileMode(h.ExternalAttrs >> 16) + case creatorNTFS, creatorVFAT, creatorFAT: + mode = msdosModeToFileMode(h.ExternalAttrs) + } + if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' { + mode |= os.ModeDir + } + return mode +} + +// SetMode changes the permission and mode bits for the FileHeader. +func (h *FileHeader) SetMode(mode os.FileMode) { + h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8 + h.ExternalAttrs = fileModeToUnixMode(mode) << 16 + + // set MSDOS attributes too, as the original zip does. + if mode&os.ModeDir != 0 { + h.ExternalAttrs |= msdosDir + } + if mode&0200 == 0 { + h.ExternalAttrs |= msdosReadOnly + } +} + +// isZip64 reports whether the file size exceeds the 32 bit limit +func (fh *FileHeader) isZip64() bool { + return fh.CompressedSize64 >= uint32max || fh.UncompressedSize64 >= uint32max +} + +func msdosModeToFileMode(m uint32) (mode os.FileMode) { + if m&msdosDir != 0 { + mode = os.ModeDir | 0777 + } else { + mode = 0666 + } + if m&msdosReadOnly != 0 { + mode &^= 0222 + } + return mode +} + +func fileModeToUnixMode(mode os.FileMode) uint32 { + var m uint32 + switch mode & os.ModeType { + default: + m = s_IFREG + case os.ModeDir: + m = s_IFDIR + case os.ModeSymlink: + m = s_IFLNK + case os.ModeNamedPipe: + m = s_IFIFO + case os.ModeSocket: + m = s_IFSOCK + case os.ModeDevice: + if mode&os.ModeCharDevice != 0 { + m = s_IFCHR + } else { + m = s_IFBLK + } + } + if mode&os.ModeSetuid != 0 { + m |= s_ISUID + } + if mode&os.ModeSetgid != 0 { + m |= s_ISGID + } + if mode&os.ModeSticky != 0 { + m |= s_ISVTX + } + return m | uint32(mode&0777) +} + +func unixModeToFileMode(m uint32) os.FileMode { + mode := os.FileMode(m & 0777) + switch m & s_IFMT { + case s_IFBLK: + mode |= os.ModeDevice + case s_IFCHR: + mode |= os.ModeDevice | os.ModeCharDevice + case s_IFDIR: + mode |= os.ModeDir + case s_IFIFO: + mode |= os.ModeNamedPipe + case s_IFLNK: + mode |= os.ModeSymlink + case s_IFREG: + // nothing to do + case s_IFSOCK: + mode |= os.ModeSocket + } + if m&s_ISGID != 0 { + mode |= os.ModeSetgid + } + if m&s_ISUID != 0 { + mode |= os.ModeSetuid + } + if m&s_ISVTX != 0 { + mode |= os.ModeSticky + } + return mode +} diff --git a/vendor/github.com/klauspost/compress/zip/testdata/crc32-not-streamed.zip b/vendor/github.com/klauspost/compress/zip/testdata/crc32-not-streamed.zip new file mode 100644 index 00000000000..f268d88732f Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/crc32-not-streamed.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/dd.zip b/vendor/github.com/klauspost/compress/zip/testdata/dd.zip new file mode 100644 index 00000000000..e53378b0b0e Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/dd.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/go-no-datadesc-sig.zip b/vendor/github.com/klauspost/compress/zip/testdata/go-no-datadesc-sig.zip new file mode 100644 index 00000000000..c3d593f44f9 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/go-no-datadesc-sig.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/go-with-datadesc-sig.zip b/vendor/github.com/klauspost/compress/zip/testdata/go-with-datadesc-sig.zip new file mode 100644 index 00000000000..bcfe121bb63 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/go-with-datadesc-sig.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/gophercolor16x16.png b/vendor/github.com/klauspost/compress/zip/testdata/gophercolor16x16.png new file mode 100644 index 00000000000..48854ff3b72 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/gophercolor16x16.png differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/readme.notzip b/vendor/github.com/klauspost/compress/zip/testdata/readme.notzip new file mode 100644 index 00000000000..06668c4c1c0 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/readme.notzip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/readme.zip b/vendor/github.com/klauspost/compress/zip/testdata/readme.zip new file mode 100644 index 00000000000..db3bb900e4e Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/readme.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/symlink.zip b/vendor/github.com/klauspost/compress/zip/testdata/symlink.zip new file mode 100644 index 00000000000..af846938cde Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/symlink.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/test-trailing-junk.zip b/vendor/github.com/klauspost/compress/zip/testdata/test-trailing-junk.zip new file mode 100644 index 00000000000..42281b4e305 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/test-trailing-junk.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/test.zip b/vendor/github.com/klauspost/compress/zip/testdata/test.zip new file mode 100644 index 00000000000..03890c05d4c Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/test.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/unix.zip b/vendor/github.com/klauspost/compress/zip/testdata/unix.zip new file mode 100644 index 00000000000..ce1a981b280 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/unix.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/winxp.zip b/vendor/github.com/klauspost/compress/zip/testdata/winxp.zip new file mode 100644 index 00000000000..3919322f0c5 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/winxp.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/zip64-2.zip b/vendor/github.com/klauspost/compress/zip/testdata/zip64-2.zip new file mode 100644 index 00000000000..f844e35373e Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/zip64-2.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/zip64.zip b/vendor/github.com/klauspost/compress/zip/testdata/zip64.zip new file mode 100644 index 00000000000..a2ee1fa33dc Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/zip64.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/writer.go b/vendor/github.com/klauspost/compress/zip/writer.go new file mode 100644 index 00000000000..5ce66e6be5e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/writer.go @@ -0,0 +1,392 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bufio" + "encoding/binary" + "errors" + "hash" + "hash/crc32" + "io" +) + +// TODO(adg): support zip file comments + +// Writer implements a zip file writer. +type Writer struct { + cw *countWriter + dir []*header + last *fileWriter + closed bool + compressors map[uint16]Compressor +} + +type header struct { + *FileHeader + offset uint64 +} + +// NewWriter returns a new Writer writing a zip file to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}} +} + +// SetOffset sets the offset of the beginning of the zip data within the +// underlying writer. It should be used when the zip data is appended to an +// existing file, such as a binary executable. +// It must be called before any data is written. +func (w *Writer) SetOffset(n int64) { + if w.cw.count != 0 { + panic("zip: SetOffset called after data was written") + } + w.cw.count = n +} + +// Flush flushes any buffered data to the underlying writer. +// Calling Flush is not normally necessary; calling Close is sufficient. +func (w *Writer) Flush() error { + return w.cw.w.(*bufio.Writer).Flush() +} + +// Close finishes writing the zip file by writing the central directory. +// It does not (and can not) close the underlying writer. +func (w *Writer) Close() error { + if w.last != nil && !w.last.closed { + if err := w.last.close(); err != nil { + return err + } + w.last = nil + } + if w.closed { + return errors.New("zip: writer closed twice") + } + w.closed = true + + // write central directory + start := w.cw.count + for _, h := range w.dir { + var buf [directoryHeaderLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(directoryHeaderSignature)) + b.uint16(h.CreatorVersion) + b.uint16(h.ReaderVersion) + b.uint16(h.Flags) + b.uint16(h.Method) + b.uint16(h.ModifiedTime) + b.uint16(h.ModifiedDate) + b.uint32(h.CRC32) + if h.isZip64() || h.offset >= uint32max { + // the file needs a zip64 header. store maxint in both + // 32 bit size fields (and offset later) to signal that the + // zip64 extra header should be used. + b.uint32(uint32max) // compressed size + b.uint32(uint32max) // uncompressed size + + // append a zip64 extra block to Extra + var buf [28]byte // 2x uint16 + 3x uint64 + eb := writeBuf(buf[:]) + eb.uint16(zip64ExtraId) + eb.uint16(24) // size = 3x uint64 + eb.uint64(h.UncompressedSize64) + eb.uint64(h.CompressedSize64) + eb.uint64(h.offset) + h.Extra = append(h.Extra, buf[:]...) + } else { + b.uint32(h.CompressedSize) + b.uint32(h.UncompressedSize) + } + b.uint16(uint16(len(h.Name))) + b.uint16(uint16(len(h.Extra))) + b.uint16(uint16(len(h.Comment))) + b = b[4:] // skip disk number start and internal file attr (2x uint16) + b.uint32(h.ExternalAttrs) + if h.offset > uint32max { + b.uint32(uint32max) + } else { + b.uint32(uint32(h.offset)) + } + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(w.cw, h.Name); err != nil { + return err + } + if _, err := w.cw.Write(h.Extra); err != nil { + return err + } + if _, err := io.WriteString(w.cw, h.Comment); err != nil { + return err + } + } + end := w.cw.count + + records := uint64(len(w.dir)) + size := uint64(end - start) + offset := uint64(start) + + if records > uint16max || size > uint32max || offset > uint32max { + var buf [directory64EndLen + directory64LocLen]byte + b := writeBuf(buf[:]) + + // zip64 end of central directory record + b.uint32(directory64EndSignature) + b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64) + b.uint16(zipVersion45) // version made by + b.uint16(zipVersion45) // version needed to extract + b.uint32(0) // number of this disk + b.uint32(0) // number of the disk with the start of the central directory + b.uint64(records) // total number of entries in the central directory on this disk + b.uint64(records) // total number of entries in the central directory + b.uint64(size) // size of the central directory + b.uint64(offset) // offset of start of central directory with respect to the starting disk number + + // zip64 end of central directory locator + b.uint32(directory64LocSignature) + b.uint32(0) // number of the disk with the start of the zip64 end of central directory + b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record + b.uint32(1) // total number of disks + + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + + // store max values in the regular end record to signal that + // that the zip64 values should be used instead + records = uint16max + size = uint32max + offset = uint32max + } + + // write end record + var buf [directoryEndLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(directoryEndSignature)) + b = b[4:] // skip over disk number and first disk number (2x uint16) + b.uint16(uint16(records)) // number of entries this disk + b.uint16(uint16(records)) // number of entries total + b.uint32(uint32(size)) // size of directory + b.uint32(uint32(offset)) // start of directory + // skipped size of comment (always zero) + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + + return w.cw.w.(*bufio.Writer).Flush() +} + +// Create adds a file to the zip file using the provided name. +// It returns a Writer to which the file contents should be written. +// The name must be a relative path: it must not start with a drive +// letter (e.g. C:) or leading slash, and only forward slashes are +// allowed. +// The file's contents must be written to the io.Writer before the next +// call to Create, CreateHeader, or Close. +func (w *Writer) Create(name string) (io.Writer, error) { + header := &FileHeader{ + Name: name, + Method: Deflate, + } + return w.CreateHeader(header) +} + +// CreateHeader adds a file to the zip file using the provided FileHeader +// for the file metadata. +// It returns a Writer to which the file contents should be written. +// +// The file's contents must be written to the io.Writer before the next +// call to Create, CreateHeader, or Close. The provided FileHeader fh +// must not be modified after a call to CreateHeader. +func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { + if w.last != nil && !w.last.closed { + if err := w.last.close(); err != nil { + return nil, err + } + } + if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh { + // See https://golang.org/issue/11144 confusion. + return nil, errors.New("archive/zip: invalid duplicate FileHeader") + } + + fh.Flags |= 0x8 // we will write a data descriptor + + fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte + fh.ReaderVersion = zipVersion20 + + fw := &fileWriter{ + zipw: w.cw, + compCount: &countWriter{w: w.cw}, + crc32: crc32.NewIEEE(), + } + comp := w.compressor(fh.Method) + if comp == nil { + return nil, ErrAlgorithm + } + var err error + fw.comp, err = comp(fw.compCount) + if err != nil { + return nil, err + } + fw.rawCount = &countWriter{w: fw.comp} + + h := &header{ + FileHeader: fh, + offset: uint64(w.cw.count), + } + w.dir = append(w.dir, h) + fw.header = h + + if err := writeHeader(w.cw, fh); err != nil { + return nil, err + } + + w.last = fw + return fw, nil +} + +func writeHeader(w io.Writer, h *FileHeader) error { + var buf [fileHeaderLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(fileHeaderSignature)) + b.uint16(h.ReaderVersion) + b.uint16(h.Flags) + b.uint16(h.Method) + b.uint16(h.ModifiedTime) + b.uint16(h.ModifiedDate) + b.uint32(0) // since we are writing a data descriptor crc32, + b.uint32(0) // compressed size, + b.uint32(0) // and uncompressed size should be zero + b.uint16(uint16(len(h.Name))) + b.uint16(uint16(len(h.Extra))) + if _, err := w.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(w, h.Name); err != nil { + return err + } + _, err := w.Write(h.Extra) + return err +} + +// RegisterCompressor registers or overrides a custom compressor for a specific +// method ID. If a compressor for a given method is not found, Writer will +// default to looking up the compressor at the package level. +func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { + if w.compressors == nil { + w.compressors = make(map[uint16]Compressor) + } + w.compressors[method] = comp +} + +func (w *Writer) compressor(method uint16) Compressor { + comp := w.compressors[method] + if comp == nil { + comp = compressor(method) + } + return comp +} + +type fileWriter struct { + *header + zipw io.Writer + rawCount *countWriter + comp io.WriteCloser + compCount *countWriter + crc32 hash.Hash32 + closed bool +} + +func (w *fileWriter) Write(p []byte) (int, error) { + if w.closed { + return 0, errors.New("zip: write to closed file") + } + w.crc32.Write(p) + return w.rawCount.Write(p) +} + +func (w *fileWriter) close() error { + if w.closed { + return errors.New("zip: file closed twice") + } + w.closed = true + if err := w.comp.Close(); err != nil { + return err + } + + // update FileHeader + fh := w.header.FileHeader + fh.CRC32 = w.crc32.Sum32() + fh.CompressedSize64 = uint64(w.compCount.count) + fh.UncompressedSize64 = uint64(w.rawCount.count) + + if fh.isZip64() { + fh.CompressedSize = uint32max + fh.UncompressedSize = uint32max + fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions + } else { + fh.CompressedSize = uint32(fh.CompressedSize64) + fh.UncompressedSize = uint32(fh.UncompressedSize64) + } + + // Write data descriptor. This is more complicated than one would + // think, see e.g. comments in zipfile.c:putextended() and + // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588. + // The approach here is to write 8 byte sizes if needed without + // adding a zip64 extra in the local header (too late anyway). + var buf []byte + if fh.isZip64() { + buf = make([]byte, dataDescriptor64Len) + } else { + buf = make([]byte, dataDescriptorLen) + } + b := writeBuf(buf) + b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X + b.uint32(fh.CRC32) + if fh.isZip64() { + b.uint64(fh.CompressedSize64) + b.uint64(fh.UncompressedSize64) + } else { + b.uint32(fh.CompressedSize) + b.uint32(fh.UncompressedSize) + } + _, err := w.zipw.Write(buf) + return err +} + +type countWriter struct { + w io.Writer + count int64 +} + +func (w *countWriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.count += int64(n) + return n, err +} + +type nopCloser struct { + io.Writer +} + +func (w nopCloser) Close() error { + return nil +} + +type writeBuf []byte + +func (b *writeBuf) uint16(v uint16) { + binary.LittleEndian.PutUint16(*b, v) + *b = (*b)[2:] +} + +func (b *writeBuf) uint32(v uint32) { + binary.LittleEndian.PutUint32(*b, v) + *b = (*b)[4:] +} + +func (b *writeBuf) uint64(v uint64) { + binary.LittleEndian.PutUint64(*b, v) + *b = (*b)[8:] +} diff --git a/vendor/github.com/klauspost/compress/zip/writer_test.go b/vendor/github.com/klauspost/compress/zip/writer_test.go new file mode 100644 index 00000000000..01b63f2358d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/writer_test.go @@ -0,0 +1,199 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "os" + "testing" +) + +// TODO(adg): a more sophisticated test suite + +type WriteTest struct { + Name string + Data []byte + Method uint16 + Mode os.FileMode +} + +var writeTests = []WriteTest{ + { + Name: "foo", + Data: []byte("Rabbits, guinea pigs, gophers, marsupial rats, and quolls."), + Method: Store, + Mode: 0666, + }, + { + Name: "bar", + Data: nil, // large data set in the test + Method: Deflate, + Mode: 0644, + }, + { + Name: "setuid", + Data: []byte("setuid file"), + Method: Deflate, + Mode: 0755 | os.ModeSetuid, + }, + { + Name: "setgid", + Data: []byte("setgid file"), + Method: Deflate, + Mode: 0755 | os.ModeSetgid, + }, + { + Name: "symlink", + Data: []byte("../link/target"), + Method: Deflate, + Mode: 0755 | os.ModeSymlink, + }, +} + +func TestWriter(t *testing.T) { + largeData := make([]byte, 1<<17) + for i := range largeData { + largeData[i] = byte(rand.Int()) + } + writeTests[1].Data = largeData + defer func() { + writeTests[1].Data = nil + }() + + // write a zip file + buf := new(bytes.Buffer) + w := NewWriter(buf) + + for _, wt := range writeTests { + testCreate(t, w, &wt) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range writeTests { + testReadFile(t, r.File[i], &wt) + } +} + +func TestWriterOffset(t *testing.T) { + largeData := make([]byte, 1<<17) + for i := range largeData { + largeData[i] = byte(rand.Int()) + } + writeTests[1].Data = largeData + defer func() { + writeTests[1].Data = nil + }() + + // write a zip file + buf := new(bytes.Buffer) + existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3} + n, _ := buf.Write(existingData) + w := NewWriter(buf) + w.SetOffset(int64(n)) + + for _, wt := range writeTests { + testCreate(t, w, &wt) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range writeTests { + testReadFile(t, r.File[i], &wt) + } +} + +func TestWriterFlush(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(struct{ io.Writer }{&buf}) + _, err := w.Create("foo") + if err != nil { + t.Fatal(err) + } + if buf.Len() > 0 { + t.Fatalf("Unexpected %d bytes already in buffer", buf.Len()) + } + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if buf.Len() == 0 { + t.Fatal("No bytes written after Flush") + } +} + +func testCreate(t *testing.T, w *Writer, wt *WriteTest) { + header := &FileHeader{ + Name: wt.Name, + Method: wt.Method, + } + if wt.Mode != 0 { + header.SetMode(wt.Mode) + } + f, err := w.CreateHeader(header) + if err != nil { + t.Fatal(err) + } + _, err = f.Write(wt.Data) + if err != nil { + t.Fatal(err) + } +} + +func testReadFile(t *testing.T, f *File, wt *WriteTest) { + if f.Name != wt.Name { + t.Fatalf("File name: got %q, want %q", f.Name, wt.Name) + } + testFileMode(t, wt.Name, f, wt.Mode) + rc, err := f.Open() + if err != nil { + t.Fatal("opening:", err) + } + b, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal("reading:", err) + } + err = rc.Close() + if err != nil { + t.Fatal("closing:", err) + } + if !bytes.Equal(b, wt.Data) { + t.Errorf("File contents %q, want %q", b, wt.Data) + } +} + +func BenchmarkCompressedZipGarbage(b *testing.B) { + b.ReportAllocs() + var buf bytes.Buffer + bigBuf := bytes.Repeat([]byte("a"), 1<<20) + for i := 0; i < b.N; i++ { + buf.Reset() + zw := NewWriter(&buf) + for j := 0; j < 3; j++ { + w, _ := zw.CreateHeader(&FileHeader{ + Name: "foo", + Method: Deflate, + }) + w.Write(bigBuf) + } + zw.Close() + } +} diff --git a/vendor/github.com/klauspost/compress/zip/zip_test.go b/vendor/github.com/klauspost/compress/zip/zip_test.go new file mode 100644 index 00000000000..681e42c0ee5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/zip_test.go @@ -0,0 +1,467 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests that involve both reading and writing. + +package zip + +import ( + "bytes" + "fmt" + "hash" + "io" + "io/ioutil" + "sort" + "strings" + "testing" + "time" +) + +func TestOver65kFiles(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + const nFiles = (1 << 16) + 42 + for i := 0; i < nFiles; i++ { + _, err := w.CreateHeader(&FileHeader{ + Name: fmt.Sprintf("%d.dat", i), + Method: Store, // avoid Issue 6136 and Issue 6138 + }) + if err != nil { + t.Fatalf("creating file %d: %v", i, err) + } + } + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + s := buf.String() + zr, err := NewReader(strings.NewReader(s), int64(len(s))) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + if got := len(zr.File); got != nFiles { + t.Fatalf("File contains %d files, want %d", got, nFiles) + } + for i := 0; i < nFiles; i++ { + want := fmt.Sprintf("%d.dat", i) + if zr.File[i].Name != want { + t.Fatalf("File(%d) = %q, want %q", i, zr.File[i].Name, want) + } + } +} + +func TestModTime(t *testing.T) { + var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC) + fh := new(FileHeader) + fh.SetModTime(testTime) + outTime := fh.ModTime() + if !outTime.Equal(testTime) { + t.Errorf("times don't match: got %s, want %s", outTime, testTime) + } +} + +func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) { + fi := fh.FileInfo() + fh2, err := FileInfoHeader(fi) + if err != nil { + t.Fatal(err) + } + if got, want := fh2.Name, fh.Name; got != want { + t.Errorf("Name: got %s, want %s\n", got, want) + } + if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want { + t.Errorf("UncompressedSize: got %d, want %d\n", got, want) + } + if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want { + t.Errorf("UncompressedSize64: got %d, want %d\n", got, want) + } + if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want { + t.Errorf("ModifiedTime: got %d, want %d\n", got, want) + } + if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want { + t.Errorf("ModifiedDate: got %d, want %d\n", got, want) + } + + if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh { + t.Errorf("Sys didn't return original *FileHeader") + } +} + +func TestFileHeaderRoundTrip(t *testing.T) { + fh := &FileHeader{ + Name: "foo.txt", + UncompressedSize: 987654321, + ModifiedTime: 1234, + ModifiedDate: 5678, + } + testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t) +} + +func TestFileHeaderRoundTrip64(t *testing.T) { + fh := &FileHeader{ + Name: "foo.txt", + UncompressedSize64: 9876543210, + ModifiedTime: 1234, + ModifiedDate: 5678, + } + testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t) +} + +type repeatedByte struct { + off int64 + b byte + n int64 +} + +// rleBuffer is a run-length-encoded byte buffer. +// It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt, +// allowing random-access reads. +type rleBuffer struct { + buf []repeatedByte +} + +func (r *rleBuffer) Size() int64 { + if len(r.buf) == 0 { + return 0 + } + last := &r.buf[len(r.buf)-1] + return last.off + last.n +} + +func (r *rleBuffer) Write(p []byte) (n int, err error) { + var rp *repeatedByte + if len(r.buf) > 0 { + rp = &r.buf[len(r.buf)-1] + // Fast path, if p is entirely the same byte repeated. + if lastByte := rp.b; len(p) > 0 && p[0] == lastByte { + all := true + for _, b := range p { + if b != lastByte { + all = false + break + } + } + if all { + rp.n += int64(len(p)) + return len(p), nil + } + } + } + + for _, b := range p { + if rp == nil || rp.b != b { + r.buf = append(r.buf, repeatedByte{r.Size(), b, 1}) + rp = &r.buf[len(r.buf)-1] + } else { + rp.n++ + } + } + return len(p), nil +} + +func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) { + if len(p) == 0 { + return + } + skipParts := sort.Search(len(r.buf), func(i int) bool { + part := &r.buf[i] + return part.off+part.n > off + }) + parts := r.buf[skipParts:] + if len(parts) > 0 { + skipBytes := off - parts[0].off + for len(parts) > 0 { + part := parts[0] + for i := skipBytes; i < part.n; i++ { + if n == len(p) { + return + } + p[n] = part.b + n++ + } + parts = parts[1:] + skipBytes = 0 + } + } + if n != len(p) { + err = io.ErrUnexpectedEOF + } + return +} + +// Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code. +func TestRLEBuffer(t *testing.T) { + b := new(rleBuffer) + var all []byte + writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"} + for _, w := range writes { + b.Write([]byte(w)) + all = append(all, w...) + } + if len(b.buf) != 10 { + t.Fatalf("len(b.buf) = %d; want 10", len(b.buf)) + } + + for i := 0; i < len(all); i++ { + for j := 0; j < len(all)-i; j++ { + buf := make([]byte, j) + n, err := b.ReadAt(buf, int64(i)) + if err != nil || n != len(buf) { + t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf)) + } + if !bytes.Equal(buf, all[i:i+j]) { + t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j]) + } + } + } +} + +// fakeHash32 is a dummy Hash32 that always returns 0. +type fakeHash32 struct { + hash.Hash32 +} + +func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil } +func (fakeHash32) Sum32() uint32 { return 0 } + +func TestZip64(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + const size = 1 << 32 // before the "END\n" part + buf := testZip64(t, size) + testZip64DirectoryRecordLength(buf, t) +} + +func TestZip64EdgeCase(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + // Test a zip file with uncompressed size 0xFFFFFFFF. + // That's the magic marker for a 64-bit file, so even though + // it fits in a 32-bit field we must use the 64-bit field. + // Go 1.5 and earlier got this wrong, + // writing an invalid zip file. + const size = 1<<32 - 1 - int64(len("END\n")) // before the "END\n" part + buf := testZip64(t, size) + testZip64DirectoryRecordLength(buf, t) +} + +func testZip64(t testing.TB, size int64) *rleBuffer { + const chunkSize = 1024 + chunks := int(size / chunkSize) + // write size bytes plus "END\n" to a zip file + buf := new(rleBuffer) + w := NewWriter(buf) + f, err := w.CreateHeader(&FileHeader{ + Name: "huge.txt", + Method: Store, + }) + if err != nil { + t.Fatal(err) + } + f.(*fileWriter).crc32 = fakeHash32{} + chunk := make([]byte, chunkSize) + for i := range chunk { + chunk[i] = '.' + } + for i := 0; i < chunks; i++ { + _, err := f.Write(chunk) + if err != nil { + t.Fatal("write chunk:", err) + } + } + if frag := int(size % chunkSize); frag > 0 { + _, err := f.Write(chunk[:frag]) + if err != nil { + t.Fatal("write chunk:", err) + } + } + end := []byte("END\n") + _, err = f.Write(end) + if err != nil { + t.Fatal("write end:", err) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read back zip file and check that we get to the end of it + r, err := NewReader(buf, int64(buf.Size())) + if err != nil { + t.Fatal("reader:", err) + } + f0 := r.File[0] + rc, err := f0.Open() + if err != nil { + t.Fatal("opening:", err) + } + rc.(*checksumReader).hash = fakeHash32{} + for i := 0; i < chunks; i++ { + _, err := io.ReadFull(rc, chunk) + if err != nil { + t.Fatal("read:", err) + } + } + if frag := int(size % chunkSize); frag > 0 { + _, err := io.ReadFull(rc, chunk[:frag]) + if err != nil { + t.Fatal("read:", err) + } + } + gotEnd, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal("read end:", err) + } + if !bytes.Equal(gotEnd, end) { + t.Errorf("End of zip64 archive %q, want %q", gotEnd, end) + } + err = rc.Close() + if err != nil { + t.Fatal("closing:", err) + } + if size+int64(len("END\n")) >= 1<<32-1 { + if got, want := f0.UncompressedSize, uint32(uint32max); got != want { + t.Errorf("UncompressedSize %#x, want %#x", got, want) + } + } + + if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want { + t.Errorf("UncompressedSize64 %#x, want %#x", got, want) + } + + return buf +} + +// Issue 9857 +func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) { + d := make([]byte, 1024) + if _, err := buf.ReadAt(d, buf.Size()-int64(len(d))); err != nil { + t.Fatal("read:", err) + } + + sigOff := findSignatureInBlock(d) + dirOff, err := findDirectory64End(buf, buf.Size()-int64(len(d))+int64(sigOff)) + if err != nil { + t.Fatal("findDirectory64End:", err) + } + + d = make([]byte, directory64EndLen) + if _, err := buf.ReadAt(d, dirOff); err != nil { + t.Fatal("read:", err) + } + + b := readBuf(d) + if sig := b.uint32(); sig != directory64EndSignature { + t.Fatalf("Expected directory64EndSignature (%d), got %d", directory64EndSignature, sig) + } + + size := b.uint64() + if size != directory64EndLen-12 { + t.Fatalf("Expected length of %d, got %d", directory64EndLen-12, size) + } +} + +func testInvalidHeader(h *FileHeader, t *testing.T) { + var buf bytes.Buffer + z := NewWriter(&buf) + + f, err := z.CreateHeader(h) + if err != nil { + t.Fatalf("error creating header: %v", err) + } + if _, err := f.Write([]byte("hi")); err != nil { + t.Fatalf("error writing content: %v", err) + } + if err := z.Close(); err != nil { + t.Fatalf("error closing zip writer: %v", err) + } + + b := buf.Bytes() + if _, err = NewReader(bytes.NewReader(b), int64(len(b))); err != ErrFormat { + t.Fatalf("got %v, expected ErrFormat", err) + } +} + +func testValidHeader(h *FileHeader, t *testing.T) { + var buf bytes.Buffer + z := NewWriter(&buf) + + f, err := z.CreateHeader(h) + if err != nil { + t.Fatalf("error creating header: %v", err) + } + if _, err := f.Write([]byte("hi")); err != nil { + t.Fatalf("error writing content: %v", err) + } + if err := z.Close(); err != nil { + t.Fatalf("error closing zip writer: %v", err) + } + + b := buf.Bytes() + zf, err := NewReader(bytes.NewReader(b), int64(len(b))) + if err != nil { + t.Fatalf("got %v, expected nil", err) + } + zh := zf.File[0].FileHeader + if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) { + t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi")) + } +} + +// Issue 4302. +func TestHeaderInvalidTagAndSize(t *testing.T) { + const timeFormat = "20060102T150405.000.txt" + + ts := time.Now() + filename := ts.Format(timeFormat) + + h := FileHeader{ + Name: filename, + Method: Deflate, + Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing + } + h.SetModTime(ts) + + testValidHeader(&h, t) +} + +func TestHeaderTooShort(t *testing.T) { + h := FileHeader{ + Name: "foo.txt", + Method: Deflate, + Extra: []byte{zip64ExtraId}, // missing size and second half of tag, but Extra is best-effort parsing + } + testValidHeader(&h, t) +} + +func TestHeaderIgnoredSize(t *testing.T) { + h := FileHeader{ + Name: "foo.txt", + Method: Deflate, + Extra: []byte{zip64ExtraId & 0xFF, zip64ExtraId >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted + } + testValidHeader(&h, t) +} + +// Issue 4393. It is valid to have an extra data header +// which contains no body. +func TestZeroLengthHeader(t *testing.T) { + h := FileHeader{ + Name: "extadata.txt", + Method: Deflate, + Extra: []byte{ + 85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5 + 85, 120, 0, 0, // tag 30805 size 0 + }, + } + testValidHeader(&h, t) +} + +// Just benchmarking how fast the Zip64 test above is. Not related to +// our zip performance, since the test above disabled CRC32 and flate. +func BenchmarkZip64Test(b *testing.B) { + for i := 0; i < b.N; i++ { + testZip64(b, 1<<26) + } +} diff --git a/vendor/github.com/klauspost/compress/zlib/example_test.go b/vendor/github.com/klauspost/compress/zlib/example_test.go new file mode 100644 index 00000000000..70408895ffd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/example_test.go @@ -0,0 +1,37 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib_test + +import ( + "bytes" + "compress/zlib" + "fmt" + "io" + "os" +) + +func ExampleNewWriter() { + var b bytes.Buffer + + w := zlib.NewWriter(&b) + w.Write([]byte("hello, world\n")) + w.Close() + fmt.Println(b.Bytes()) + // Output: [120 156 202 72 205 201 201 215 81 40 207 47 202 73 225 2 4 0 0 255 255 33 231 4 147] +} + +func ExampleNewReader() { + buff := []byte{120, 156, 202, 72, 205, 201, 201, 215, 81, 40, 207, + 47, 202, 73, 225, 2, 4, 0, 0, 255, 255, 33, 231, 4, 147} + b := bytes.NewReader(buff) + + r, err := zlib.NewReader(b) + if err != nil { + panic(err) + } + io.Copy(os.Stdout, r) + // Output: hello, world + r.Close() +} diff --git a/vendor/github.com/klauspost/compress/zlib/reader.go b/vendor/github.com/klauspost/compress/zlib/reader.go new file mode 100644 index 00000000000..d9091e83112 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/reader.go @@ -0,0 +1,183 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zlib implements reading and writing of zlib format compressed data, +as specified in RFC 1950. + +The implementation provides filters that uncompress during reading +and compress during writing. For example, to write compressed data +to a buffer: + + var b bytes.Buffer + w := zlib.NewWriter(&b) + w.Write([]byte("hello, world\n")) + w.Close() + +and to read that data back: + + r, err := zlib.NewReader(&b) + io.Copy(os.Stdout, r) + r.Close() +*/ +package zlib + +import ( + "bufio" + "errors" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +const zlibDeflate = 8 + +var ( + // ErrChecksum is returned when reading ZLIB data that has an invalid checksum. + ErrChecksum = errors.New("zlib: invalid checksum") + // ErrDictionary is returned when reading ZLIB data that has an invalid dictionary. + ErrDictionary = errors.New("zlib: invalid dictionary") + // ErrHeader is returned when reading ZLIB data that has an invalid header. + ErrHeader = errors.New("zlib: invalid header") +) + +type reader struct { + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + err error + scratch [4]byte +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// NewReader creates a new ReadCloser. +// Reads from the returned ReadCloser read and decompress data from r. +// If r does not implement io.ByteReader, the decompressor may read more +// data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser when done. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) (io.ReadCloser, error) { + return NewReaderDict(r, nil) +} + +// NewReaderDict is like NewReader but uses a preset dictionary. +// NewReaderDict ignores the dictionary if the compressed data does not refer to it. +// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. +// +// The ReadCloser returned by NewReaderDict also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { + z := new(reader) + err := z.Reset(r, dict) + if err != nil { + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + + var n int + n, z.err = z.decompressor.Read(p) + z.digest.Write(p[0:n]) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum. + if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return n, z.err + } + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != z.digest.Sum32() { + z.err = ErrChecksum + return n, z.err + } + return n, io.EOF +} + +// Calling Close does not close the wrapped io.Reader originally passed to NewReader. +// In order for the ZLIB checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *reader) Close() error { + if z.err != nil && z.err != io.EOF { + return z.err + } + z.err = z.decompressor.Close() + return z.err +} + +func (z *reader) Reset(r io.Reader, dict []byte) error { + *z = reader{decompressor: z.decompressor, digest: z.digest} + if fr, ok := r.(flate.Reader); ok { + z.r = fr + } else { + z.r = bufio.NewReader(r) + } + + // Read the header (RFC 1950 section 2.2.). + _, z.err = io.ReadFull(z.r, z.scratch[0:2]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) + if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { + z.err = ErrHeader + return z.err + } + haveDict := z.scratch[1]&0x20 != 0 + if haveDict { + _, z.err = io.ReadFull(z.r, z.scratch[0:4]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != adler32.Checksum(dict) { + z.err = ErrDictionary + return z.err + } + } + + if z.decompressor == nil { + if haveDict { + z.decompressor = flate.NewReaderDict(z.r, dict) + } else { + z.decompressor = flate.NewReader(z.r) + } + } else { + z.decompressor.(flate.Resetter).Reset(z.r, dict) + } + + if z.digest != nil { + z.digest.Reset() + } else { + z.digest = adler32.New() + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zlib/reader_test.go b/vendor/github.com/klauspost/compress/zlib/reader_test.go new file mode 100644 index 00000000000..7e27aecb47d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/reader_test.go @@ -0,0 +1,179 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "bytes" + "io" + "testing" +) + +type zlibTest struct { + desc string + raw string + compressed []byte + dict []byte + err error +} + +// Compare-to-golden test data was generated by the ZLIB example program at +// http://www.zlib.net/zpipe.c + +var zlibTests = []zlibTest{ + { + "truncated empty", + "", + []byte{}, + nil, + io.ErrUnexpectedEOF, + }, + { + "truncated dict", + "", + []byte{0x78, 0xbb}, + []byte{0x00}, + io.ErrUnexpectedEOF, + }, + { + "truncated checksum", + "", + []byte{0x78, 0xbb, 0x00, 0x01, 0x00, 0x01, 0xca, 0x48, + 0xcd, 0xc9, 0xc9, 0xd7, 0x51, 0x28, 0xcf, 0x2f, + 0xca, 0x49, 0x01, 0x04, 0x00, 0x00, 0xff, 0xff, + }, + []byte{0x00}, + io.ErrUnexpectedEOF, + }, + { + "empty", + "", + []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01}, + nil, + nil, + }, + { + "goodbye", + "goodbye, world", + []byte{ + 0x78, 0x9c, 0x4b, 0xcf, 0xcf, 0x4f, 0x49, 0xaa, + 0x4c, 0xd5, 0x51, 0x28, 0xcf, 0x2f, 0xca, 0x49, + 0x01, 0x00, 0x28, 0xa5, 0x05, 0x5e, + }, + nil, + nil, + }, + { + "bad header", + "", + []byte{0x78, 0x9f, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01}, + nil, + ErrHeader, + }, + { + "bad checksum", + "", + []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff}, + nil, + ErrChecksum, + }, + { + "not enough data", + "", + []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00}, + nil, + io.ErrUnexpectedEOF, + }, + { + "excess data is silently ignored", + "", + []byte{ + 0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x78, 0x9c, 0xff, + }, + nil, + nil, + }, + { + "dictionary", + "Hello, World!\n", + []byte{ + 0x78, 0xbb, 0x1c, 0x32, 0x04, 0x27, 0xf3, 0x00, + 0xb1, 0x75, 0x20, 0x1c, 0x45, 0x2e, 0x00, 0x24, + 0x12, 0x04, 0x74, + }, + []byte{ + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x0a, + }, + nil, + }, + { + "wrong dictionary", + "", + []byte{ + 0x78, 0xbb, 0x1c, 0x32, 0x04, 0x27, 0xf3, 0x00, + 0xb1, 0x75, 0x20, 0x1c, 0x45, 0x2e, 0x00, 0x24, + 0x12, 0x04, 0x74, + }, + []byte{ + 0x48, 0x65, 0x6c, 0x6c, + }, + ErrDictionary, + }, + { + "truncated zlib stream amid raw-block", + "hello", + []byte{ + 0x78, 0x9c, 0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f, + }, + nil, + io.ErrUnexpectedEOF, + }, + { + "truncated zlib stream amid fixed-block", + "He", + []byte{ + 0x78, 0x9c, 0xf2, 0x48, 0xcd, + }, + nil, + io.ErrUnexpectedEOF, + }, +} + +func TestDecompressor(t *testing.T) { + b := new(bytes.Buffer) + for _, tt := range zlibTests { + in := bytes.NewReader(tt.compressed) + zr, err := NewReaderDict(in, tt.dict) + if err != nil { + if err != tt.err { + t.Errorf("%s: NewReader: %s", tt.desc, err) + } + continue + } + defer zr.Close() + + // Read and verify correctness of data. + b.Reset() + n, err := io.Copy(b, zr) + if err != nil { + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.desc, err, tt.err) + } + continue + } + s := b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.desc, n, s, len(tt.raw), tt.raw) + } + + // Check for sticky errors. + if n, err := zr.Read([]byte{0}); n != 0 || err != io.EOF { + t.Errorf("%s: Read() = (%d, %v), want (0, io.EOF)", tt.desc, n, err) + } + if err := zr.Close(); err != nil { + t.Errorf("%s: Close() = %v, want nil", tt.desc, err) + } + } +} diff --git a/vendor/github.com/klauspost/compress/zlib/writer.go b/vendor/github.com/klauspost/compress/zlib/writer.go new file mode 100644 index 00000000000..605816ba4f3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/writer.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "fmt" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/zlib" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + w io.Writer + level int + dict []byte + compressor *flate.Writer + digest hash.Hash32 + err error + scratch [4]byte + wroteHeader bool +} + +// NewWriter creates a new Writer. +// Writes to the returned Writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevelDict(w, DefaultCompression, nil) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, HuffmanOnly +// or any integer value between BestSpeed and BestCompression inclusive. +// The error returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + return NewWriterLevelDict(w, level, nil) +} + +// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to +// compress with. +// +// The dictionary may be nil. If not, its contents should not be modified until +// the Writer is closed. +func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("zlib: invalid compression level: %d", level) + } + return &Writer{ + w: w, + level: level, + dict: dict, + }, nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing +// to w. +func (z *Writer) Reset(w io.Writer) { + z.w = w + // z.level and z.dict left unchanged. + if z.compressor != nil { + z.compressor.Reset(w) + } + if z.digest != nil { + z.digest.Reset() + } + z.err = nil + z.scratch = [4]byte{} + z.wroteHeader = false +} + +// writeHeader writes the ZLIB header. +func (z *Writer) writeHeader() (err error) { + z.wroteHeader = true + // ZLIB has a two-byte header (as documented in RFC 1950). + // The first four bits is the CINFO (compression info), which is 7 for the default deflate window size. + // The next four bits is the CM (compression method), which is 8 for deflate. + z.scratch[0] = 0x78 + // The next two bits is the FLEVEL (compression level). The four values are: + // 0=fastest, 1=fast, 2=default, 3=best. + // The next bit, FDICT, is set if a dictionary is given. + // The final five FCHECK bits form a mod-31 checksum. + switch z.level { + case -2, 0, 1: + z.scratch[1] = 0 << 6 + case 2, 3, 4, 5: + z.scratch[1] = 1 << 6 + case 6, -1: + z.scratch[1] = 2 << 6 + case 7, 8, 9: + z.scratch[1] = 3 << 6 + default: + panic("unreachable") + } + if z.dict != nil { + z.scratch[1] |= 1 << 5 + } + z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) + if _, err = z.w.Write(z.scratch[0:2]); err != nil { + return err + } + if z.dict != nil { + // The next four bytes are the Adler-32 checksum of the dictionary. + checksum := adler32.Checksum(z.dict) + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + if _, err = z.w.Write(z.scratch[0:4]); err != nil { + return err + } + } + if z.compressor == nil { + // Initialize deflater unless the Writer is being reused + // after a Reset call. + z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict) + if err != nil { + return err + } + z.digest = adler32.New() + } + return nil +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed or +// explicitly flushed. +func (z *Writer) Write(p []byte) (n int, err error) { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + n, err = z.compressor.Write(p) + if err != nil { + z.err = err + return + } + z.digest.Write(p) + return +} + +// Flush flushes the Writer to its underlying io.Writer. +func (z *Writer) Flush() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + checksum := z.digest.Sum32() + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + _, z.err = z.w.Write(z.scratch[0:4]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/zlib/writer_test.go b/vendor/github.com/klauspost/compress/zlib/writer_test.go new file mode 100644 index 00000000000..5f4af641a4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/writer_test.go @@ -0,0 +1,212 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "testing" +) + +var filenames = []string{ + "../testdata/gettysburg.txt", + "../testdata/e.txt", + "../testdata/pi.txt", +} + +var data = []string{ + "test a reasonable sized string that can be compressed", +} + +// Tests that compressing and then decompressing the given file at the given compression level and dictionary +// yields equivalent bytes to the original file. +func testFileLevelDict(t *testing.T, fn string, level int, d string) { + // Read the file, as golden output. + golden, err := os.Open(fn) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + defer golden.Close() + b0, err0 := ioutil.ReadAll(golden) + if err0 != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err0) + return + } + testLevelDict(t, fn, b0, level, d) +} + +func testLevelDict(t *testing.T, fn string, b0 []byte, level int, d string) { + // Make dictionary, if given. + var dict []byte + if d != "" { + dict = []byte(d) + } + + // Push data through a pipe that compresses at the write end, and decompresses at the read end. + piper, pipew := io.Pipe() + defer piper.Close() + go func() { + defer pipew.Close() + zlibw, err := NewWriterLevelDict(pipew, level, dict) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + defer zlibw.Close() + _, err = zlibw.Write(b0) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + }() + zlibr, err := NewReaderDict(piper, dict) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + defer zlibr.Close() + + // Compare the decompressed data. + b1, err1 := ioutil.ReadAll(zlibr) + if err1 != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err1) + return + } + if len(b0) != len(b1) { + t.Errorf("%s (level=%d, dict=%q): length mismatch %d versus %d", fn, level, d, len(b0), len(b1)) + return + } + for i := 0; i < len(b0); i++ { + if b0[i] != b1[i] { + t.Errorf("%s (level=%d, dict=%q): mismatch at %d, 0x%02x versus 0x%02x\n", fn, level, d, i, b0[i], b1[i]) + return + } + } +} + +func testFileLevelDictReset(t *testing.T, fn string, level int, dict []byte) { + var b0 []byte + var err error + if fn != "" { + b0, err = ioutil.ReadFile(fn) + if err != nil { + t.Errorf("%s (level=%d): %v", fn, level, err) + return + } + } + + // Compress once. + buf := new(bytes.Buffer) + var zlibw *Writer + if dict == nil { + zlibw, err = NewWriterLevel(buf, level) + } else { + zlibw, err = NewWriterLevelDict(buf, level, dict) + } + if err == nil { + _, err = zlibw.Write(b0) + } + if err == nil { + err = zlibw.Close() + } + if err != nil { + t.Errorf("%s (level=%d): %v", fn, level, err) + return + } + out := buf.String() + + // Reset and compress again. + buf2 := new(bytes.Buffer) + zlibw.Reset(buf2) + _, err = zlibw.Write(b0) + if err == nil { + err = zlibw.Close() + } + if err != nil { + t.Errorf("%s (level=%d): %v", fn, level, err) + return + } + out2 := buf2.String() + + if out2 != out { + t.Errorf("%s (level=%d): different output after reset (got %d bytes, expected %d", + fn, level, len(out2), len(out)) + } +} + +func TestWriter(t *testing.T) { + for i, s := range data { + b := []byte(s) + tag := fmt.Sprintf("#%d", i) + testLevelDict(t, tag, b, DefaultCompression, "") + testLevelDict(t, tag, b, NoCompression, "") + testLevelDict(t, tag, b, HuffmanOnly, "") + for level := BestSpeed; level <= BestCompression; level++ { + testLevelDict(t, tag, b, level, "") + } + } +} + +func TestWriterBig(t *testing.T) { + for _, fn := range filenames { + testFileLevelDict(t, fn, DefaultCompression, "") + testFileLevelDict(t, fn, NoCompression, "") + testFileLevelDict(t, fn, HuffmanOnly, "") + for level := BestSpeed; level <= BestCompression; level++ { + testFileLevelDict(t, fn, level, "") + } + } +} + +func TestWriterDict(t *testing.T) { + const dictionary = "0123456789." + for _, fn := range filenames { + testFileLevelDict(t, fn, DefaultCompression, dictionary) + testFileLevelDict(t, fn, NoCompression, dictionary) + testFileLevelDict(t, fn, HuffmanOnly, dictionary) + for level := BestSpeed; level <= BestCompression; level++ { + testFileLevelDict(t, fn, level, dictionary) + } + } +} + +func TestWriterReset(t *testing.T) { + const dictionary = "0123456789." + for _, fn := range filenames { + testFileLevelDictReset(t, fn, NoCompression, nil) + testFileLevelDictReset(t, fn, DefaultCompression, nil) + testFileLevelDictReset(t, fn, HuffmanOnly, nil) + testFileLevelDictReset(t, fn, NoCompression, []byte(dictionary)) + testFileLevelDictReset(t, fn, DefaultCompression, []byte(dictionary)) + testFileLevelDictReset(t, fn, HuffmanOnly, []byte(dictionary)) + if testing.Short() { + break + } + for level := BestSpeed; level <= BestCompression; level++ { + testFileLevelDictReset(t, fn, level, nil) + } + } +} + +func TestWriterDictIsUsed(t *testing.T) { + var input = []byte("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.") + var buf bytes.Buffer + compressor, err := NewWriterLevelDict(&buf, BestCompression, input) + if err != nil { + t.Errorf("error in NewWriterLevelDict: %s", err) + return + } + compressor.Write(input) + compressor.Close() + const expectedMaxSize = 25 + output := buf.Bytes() + if len(output) > expectedMaxSize { + t.Errorf("result too large (got %d, want <= %d bytes). Is the dictionary being used?", len(output), expectedMaxSize) + } +} diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml new file mode 100644 index 00000000000..630192d597b --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.travis.yml @@ -0,0 +1,23 @@ +language: go + +sudo: false + +os: + - linux + - osx +go: + - 1.8.x + - 1.9.x + - 1.10.x + - master + +script: + - go vet ./... + - go test -v ./... + - go test -race ./... + - diff <(gofmt -d .) <("") + +matrix: + allow_failures: + - go: 'master' + fast_finish: true diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt new file mode 100644 index 00000000000..2ef4714f716 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt @@ -0,0 +1,35 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE new file mode 100644 index 00000000000..5cec7ee949b --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md new file mode 100644 index 00000000000..b2b6bee879a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/README.md @@ -0,0 +1,145 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg +[2]: https://godoc.org/github.com/klauspost/cpuid +[3]: https://travis-ci.org/klauspost/cpuid.svg +[4]: https://travis-ci.org/klauspost/cpuid + +# features +## CPU Instructions +* **CMOV** (i686 CMOV) +* **NX** (NX (No-Execute) bit) +* **AMD3DNOW** (AMD 3DNOW) +* **AMD3DNOWEXT** (AMD 3DNowExt) +* **MMX** (standard MMX) +* **MMXEXT** (SSE integer functions or AMD MMX ext) +* **SSE** (SSE functions) +* **SSE2** (P4 SSE functions) +* **SSE3** (Prescott SSE3 functions) +* **SSSE3** (Conroe SSSE3 functions) +* **SSE4** (Penryn SSE4.1 functions) +* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) +* **SSE42** (Nehalem SSE4.2 functions) +* **AVX** (AVX functions) +* **AVX2** (AVX2 functions) +* **FMA3** (Intel FMA 3) +* **FMA4** (Bulldozer FMA4 functions) +* **XOP** (Bulldozer XOP functions) +* **F16C** (Half-precision floating-point conversion) +* **BMI1** (Bit Manipulation Instruction Set 1) +* **BMI2** (Bit Manipulation Instruction Set 2) +* **TBM** (AMD Trailing Bit Manipulation) +* **LZCNT** (LZCNT instruction) +* **POPCNT** (POPCNT instruction) +* **AESNI** (Advanced Encryption Standard New Instructions) +* **CLMUL** (Carry-less Multiplication) +* **HTT** (Hyperthreading (enabled)) +* **HLE** (Hardware Lock Elision) +* **RTM** (Restricted Transactional Memory) +* **RDRAND** (RDRAND instruction is available) +* **RDSEED** (RDSEED instruction is available) +* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +* **SHA** (Intel SHA Extensions) +* **AVX512F** (AVX-512 Foundation) +* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) +* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) +* **AVX512PF** (AVX-512 Prefetch Instructions) +* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) +* **AVX512CD** (AVX-512 Conflict Detection Instructions) +* **AVX512BW** (AVX-512 Byte and Word Instructions) +* **AVX512VL** (AVX-512 Vector Length Extensions) +* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) +* **MPX** (Intel MPX (Memory Protection Extensions)) +* **ERMS** (Enhanced REP MOVSB/STOSB) +* **RDTSCP** (RDTSCP Instruction) +* **CX16** (CMPXCHG16B Instruction) +* **SGX** (Software Guard Extensions, with activation details) + +## Performance +* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. +* **SSE2SLOW** (SSE2 is supported, but usually not faster) +* **SSE3SLOW** (SSE3 is supported, but usually not faster) +* **ATOM** (Atom processor, some SSSE3 instructions are slower) +* **Cache line** (Probable size of a cache line). +* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. + +## Cpu Vendor/VM +* **Intel** +* **AMD** +* **VIA** +* **Transmeta** +* **NSC** +* **KVM** (Kernel-based Virtual Machine) +* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) +* **VMware** +* **XenHVM** + +# installing + +```go get github.com/klauspost/cpuid``` + +# example + +```Go +package main + +import ( + "fmt" + "github.com/klauspost/cpuid" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", cpuid.CPU.BrandName) + fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) + fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) + fmt.Println("Features:", cpuid.CPU.Features) + fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) + fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") + + // Test if we have a specific feature: + if cpuid.CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz +PhysicalCores: 2 +ThreadsPerCore: 2 +LogicalCores: 4 +Family 6 Model: 42 +Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL +Cacheline bytes: 64 +We have Streaming SIMD Extensions +``` + +# private package + +In the "private" folder you can find an autogenerated version of the library you can include in your own packages. + +For this purpose all exports are removed, and functions and constants are lowercased. + +This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go new file mode 100644 index 00000000000..60c681bed24 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid.go @@ -0,0 +1,1040 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import "strings" + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + Other Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM +) + +const ( + CMOV = 1 << iota // i686 CMOV + NX // NX (No-Execute) bit + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSSE3 // Conroe SSSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSE42 // Nehalem SSE4.2 functions + AVX // AVX functions + AVX2 // AVX2 functions + FMA3 // Intel FMA 3 + FMA4 // Bulldozer FMA4 functions + XOP // Bulldozer XOP functions + F16C // Half-precision floating-point conversion + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + TBM // AMD Trailing Bit Manipulation + LZCNT // LZCNT instruction + POPCNT // POPCNT instruction + AESNI // Advanced Encryption Standard New Instructions + CLMUL // Carry-less Multiplication + HTT // Hyperthreading (enabled) + HLE // Hardware Lock Elision + RTM // Restricted Transactional Memory + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA // Intel SHA Extensions + AVX512F // AVX-512 Foundation + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512BW // AVX-512 Byte and Word Instructions + AVX512VL // AVX-512 Vector Length Extensions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + MPX // Intel MPX (Memory Protection Extensions) + ERMS // Enhanced REP MOVSB/STOSB + RDTSCP // RDTSCP Instruction + CX16 // CMPXCHG16B Instruction + SGX // Software Guard Extensions + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + STIBP // Single Thread Indirect Branch Predictors + + // Performance indicators + SSE2SLOW // SSE2 is supported, but usually not faster + SSE3SLOW // SSE3 is supported, but usually not faster + ATOM // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[Flags]string{ + CMOV: "CMOV", // i686 CMOV + NX: "NX", // NX (No-Execute) bit + AMD3DNOW: "AMD3DNOW", // AMD 3DNOW + AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt + MMX: "MMX", // Standard MMX + MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext + SSE: "SSE", // SSE functions + SSE2: "SSE2", // P4 SSE2 functions + SSE3: "SSE3", // Prescott SSE3 functions + SSSE3: "SSSE3", // Conroe SSSE3 functions + SSE4: "SSE4.1", // Penryn SSE4.1 functions + SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + SSE42: "SSE4.2", // Nehalem SSE4.2 functions + AVX: "AVX", // AVX functions + AVX2: "AVX2", // AVX functions + FMA3: "FMA3", // Intel FMA 3 + FMA4: "FMA4", // Bulldozer FMA4 functions + XOP: "XOP", // Bulldozer XOP functions + F16C: "F16C", // Half-precision floating-point conversion + BMI1: "BMI1", // Bit Manipulation Instruction Set 1 + BMI2: "BMI2", // Bit Manipulation Instruction Set 2 + TBM: "TBM", // AMD Trailing Bit Manipulation + LZCNT: "LZCNT", // LZCNT instruction + POPCNT: "POPCNT", // POPCNT instruction + AESNI: "AESNI", // Advanced Encryption Standard New Instructions + CLMUL: "CLMUL", // Carry-less Multiplication + HTT: "HTT", // Hyperthreading (enabled) + HLE: "HLE", // Hardware Lock Elision + RTM: "RTM", // Restricted Transactional Memory + RDRAND: "RDRAND", // RDRAND instruction is available + RDSEED: "RDSEED", // RDSEED instruction is available + ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA: "SHA", // Intel SHA Extensions + AVX512F: "AVX512F", // AVX-512 Foundation + AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions + AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions + AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions + AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions + AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + MPX: "MPX", // Intel MPX (Memory Protection Extensions) + ERMS: "ERMS", // Enhanced REP MOVSB/STOSB + RDTSCP: "RDTSCP", // RDTSCP Instruction + CX16: "CX16", // CMPXCHG16B Instruction + SGX: "SGX", // Software Guard Extensions + IBPB: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier + STIBP: "STIBP", // Single Thread Indirect Branch Predictors + + // Performance indicators + SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster + SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster + ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + Features Flags // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + SGX SGXSupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + CPU.maxFunc = maxFunctionID() + CPU.maxExFunc = maxExtendedFunction() + CPU.BrandName = brandName() + CPU.CacheLine = cacheLine() + CPU.Family, CPU.Model = familyModel() + CPU.Features = support() + CPU.SGX = hasSGX(CPU.Features&SGX != 0) + CPU.ThreadsPerCore = threadsPerCore() + CPU.LogicalCores = logicalCores() + CPU.PhysicalCores = physicalCores() + CPU.VendorID = vendorID() + CPU.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c CPUInfo) Cmov() bool { + return c.Features&CMOV != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c CPUInfo) Amd3dnow() bool { + return c.Features&AMD3DNOW != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c CPUInfo) Amd3dnowExt() bool { + return c.Features&AMD3DNOWEXT != 0 +} + +// MMX indicates support of MMX instructions +func (c CPUInfo) MMX() bool { + return c.Features&MMX != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c CPUInfo) MMXExt() bool { + return c.Features&MMXEXT != 0 +} + +// SSE indicates support of SSE instructions +func (c CPUInfo) SSE() bool { + return c.Features&SSE != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c CPUInfo) SSE2() bool { + return c.Features&SSE2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c CPUInfo) SSE3() bool { + return c.Features&SSE3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c CPUInfo) SSSE3() bool { + return c.Features&SSSE3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c CPUInfo) SSE4() bool { + return c.Features&SSE4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c CPUInfo) SSE42() bool { + return c.Features&SSE42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c CPUInfo) AVX() bool { + return c.Features&AVX != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c CPUInfo) AVX2() bool { + return c.Features&AVX2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c CPUInfo) FMA3() bool { + return c.Features&FMA3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c CPUInfo) FMA4() bool { + return c.Features&FMA4 != 0 +} + +// XOP indicates support of XOP instructions +func (c CPUInfo) XOP() bool { + return c.Features&XOP != 0 +} + +// F16C indicates support of F16C instructions +func (c CPUInfo) F16C() bool { + return c.Features&F16C != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c CPUInfo) BMI1() bool { + return c.Features&BMI1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c CPUInfo) BMI2() bool { + return c.Features&BMI2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c CPUInfo) TBM() bool { + return c.Features&TBM != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c CPUInfo) Lzcnt() bool { + return c.Features&LZCNT != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c CPUInfo) Popcnt() bool { + return c.Features&POPCNT != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c CPUInfo) HTT() bool { + return c.Features&HTT != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c CPUInfo) SSE2Slow() bool { + return c.Features&SSE2SLOW != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c CPUInfo) SSE3Slow() bool { + return c.Features&SSE3SLOW != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c CPUInfo) AesNi() bool { + return c.Features&AESNI != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c CPUInfo) Clmul() bool { + return c.Features&CLMUL != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c CPUInfo) NX() bool { + return c.Features&NX != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c CPUInfo) SSE4A() bool { + return c.Features&SSE4A != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c CPUInfo) HLE() bool { + return c.Features&HLE != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c CPUInfo) RTM() bool { + return c.Features&RTM != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c CPUInfo) Rdrand() bool { + return c.Features&RDRAND != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c CPUInfo) Rdseed() bool { + return c.Features&RDSEED != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c CPUInfo) ADX() bool { + return c.Features&ADX != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c CPUInfo) SHA() bool { + return c.Features&SHA != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c CPUInfo) AVX512F() bool { + return c.Features&AVX512F != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c CPUInfo) AVX512DQ() bool { + return c.Features&AVX512DQ != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c CPUInfo) AVX512IFMA() bool { + return c.Features&AVX512IFMA != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c CPUInfo) AVX512PF() bool { + return c.Features&AVX512PF != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c CPUInfo) AVX512ER() bool { + return c.Features&AVX512ER != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c CPUInfo) AVX512CD() bool { + return c.Features&AVX512CD != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c CPUInfo) AVX512BW() bool { + return c.Features&AVX512BW != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c CPUInfo) AVX512VL() bool { + return c.Features&AVX512VL != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c CPUInfo) AVX512VBMI() bool { + return c.Features&AVX512VBMI != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c CPUInfo) MPX() bool { + return c.Features&MPX != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c CPUInfo) ERMS() bool { + return c.Features&ERMS != 0 +} + +// RDTSCP Instruction is available. +func (c CPUInfo) RDTSCP() bool { + return c.Features&RDTSCP != 0 +} + +// CX16 indicates if CMPXCHG16B instruction is available. +func (c CPUInfo) CX16() bool { + return c.Features&CX16 != 0 +} + +// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. +// So TSX simply checks that. +func (c CPUInfo) TSX() bool { + return c.Features&(HLE|RTM) == HLE|RTM +} + +// Atom indicates an Atom processor +func (c CPUInfo) Atom() bool { + return c.Features&ATOM != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c CPUInfo) Intel() bool { + return c.VendorID == Intel +} + +// AMD returns true if vendor is recognized as AMD +func (c CPUInfo) AMD() bool { + return c.VendorID == AMD +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c CPUInfo) Transmeta() bool { + return c.VendorID == Transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c CPUInfo) NSC() bool { + return c.VendorID == NSC +} + +// VIA returns true if vendor is recognized as VIA +func (c CPUInfo) VIA() bool { + return c.VendorID == VIA +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.RDTSCP() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.RDTSCP() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c CPUInfo) VM() bool { + switch c.VendorID { + case MSVM, KVM, VMware, XenHVM: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type Flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f Flags) String() string { + return strings.Join(f.Strings(), ",") +} + +// Strings returns and array of the detected features. +func (f Flags) Strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := Flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != Intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case Intel: + return logicalCores() / threadsPerCore() + case AMD: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, +} + +func vendorID() Vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return Other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +type SGXSupport struct { + Available bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 +} + +func hasSGX(available bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + + return +} + +func support() Flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= CMOV + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 25)) != 0 { + rval |= MMXEXT + } + if (d & (1 << 25)) != 0 { + rval |= SSE + } + if (d & (1 << 26)) != 0 { + rval |= SSE2 + } + if (c & 1) != 0 { + rval |= SSE3 + } + if (c & 0x00000200) != 0 { + rval |= SSSE3 + } + if (c & 0x00080000) != 0 { + rval |= SSE4 + } + if (c & 0x00100000) != 0 { + rval |= SSE42 + } + if (c & (1 << 25)) != 0 { + rval |= AESNI + } + if (c & (1 << 1)) != 0 { + rval |= CLMUL + } + if c&(1<<23) != 0 { + rval |= POPCNT + } + if c&(1<<30) != 0 { + rval |= RDRAND + } + if c&(1<<29) != 0 { + rval |= F16C + } + if c&(1<<13) != 0 { + rval |= CX16 + } + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= HTT + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= AVX + if (c & 0x00001000) != 0 { + rval |= FMA3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { + rval |= AVX2 + } + if (ebx & 0x00000008) != 0 { + rval |= BMI1 + if (ebx & 0x00000100) != 0 { + rval |= BMI2 + } + } + if ebx&(1<<2) != 0 { + rval |= SGX + } + if ebx&(1<<4) != 0 { + rval |= HLE + } + if ebx&(1<<9) != 0 { + rval |= ERMS + } + if ebx&(1<<11) != 0 { + rval |= RTM + } + if ebx&(1<<14) != 0 { + rval |= MPX + } + if ebx&(1<<18) != 0 { + rval |= RDSEED + } + if ebx&(1<<19) != 0 { + rval |= ADX + } + if ebx&(1<<29) != 0 { + rval |= SHA + } + if edx&(1<<26) != 0 { + rval |= IBPB + } + if edx&(1<<27) != 0 { + rval |= STIBP + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= AVX512F + } + if ebx&(1<<17) != 0 { + rval |= AVX512DQ + } + if ebx&(1<<21) != 0 { + rval |= AVX512IFMA + } + if ebx&(1<<26) != 0 { + rval |= AVX512PF + } + if ebx&(1<<27) != 0 { + rval |= AVX512ER + } + if ebx&(1<<28) != 0 { + rval |= AVX512CD + } + if ebx&(1<<30) != 0 { + rval |= AVX512BW + } + if ebx&(1<<31) != 0 { + rval |= AVX512VL + } + // ecx + if ecx&(1<<1) != 0 { + rval |= AVX512VBMI + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= LZCNT + rval |= POPCNT + } + if (d & (1 << 31)) != 0 { + rval |= AMD3DNOW + } + if (d & (1 << 30)) != 0 { + rval |= AMD3DNOWEXT + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 22)) != 0 { + rval |= MMXEXT + } + if (c & (1 << 6)) != 0 { + rval |= SSE4A + } + if d&(1<<20) != 0 { + rval |= NX + } + if d&(1<<27) != 0 { + rval |= RDTSCP + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != Intel && + rval&SSE2 != 0 && (c&0x00000040) == 0 { + rval |= SSE2SLOW + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & AVX) != 0 { + if (c & 0x00000800) != 0 { + rval |= XOP + } + if (c & 0x00010000) != 0 { + rval |= FMA4 + } + } + + if vendorID() == Intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & SSE2) != 0 { + rval |= SSE2SLOW + } + if (rval & SSE3) != 0 { + rval |= SSE3SLOW + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= ATOM + } + } + } + return Flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s new file mode 100644 index 00000000000..4d731711e48 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_386.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s new file mode 100644 index 00000000000..3c1d60e4221 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_test.go b/vendor/github.com/klauspost/cpuid/cpuid_test.go new file mode 100644 index 00000000000..96d50547158 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_test.go @@ -0,0 +1,737 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import ( + "fmt" + "testing" +) + +// There is no real way to test a CPU identifier, since results will +// obviously differ on each machine. +func TestCPUID(t *testing.T) { + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("Name:", CPU.BrandName) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + + if CPU.SSE2() { + t.Log("We have SSE2") + } +} + +func TestDumpCPUID(t *testing.T) { + n := int(maxFunctionID()) + for i := 0; i <= n; i++ { + a, b, c, d := cpuidex(uint32(i), 0) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + ex := uint32(1) + for { + a2, b2, c2, d2 := cpuidex(uint32(i), ex) + if a2 == a && b2 == b && d2 == d || ex > 50 || a2 == 0 { + break + } + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a2, b2, c2, d2) + a, b, c, d = a2, b2, c2, d2 + ex++ + } + } + n2 := maxExtendedFunction() + for i := uint32(0x80000000); i <= n2; i++ { + a, b, c, d := cpuid(i) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + } +} + +func Example() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model) + fmt.Println("Features:", CPU.Features) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + + // Test if we have a specific feature: + if CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} + +func TestBrandNameZero(t *testing.T) { + if len(CPU.BrandName) > 0 { + // Cut out last byte + last := []byte(CPU.BrandName[len(CPU.BrandName)-1:]) + if last[0] == 0 { + t.Fatal("last byte was zero") + } else if last[0] == 32 { + t.Fatal("whitespace wasn't trimmed") + } + } +} + +// Generated here: http://play.golang.org/p/mko-0tFt0Q + +// TestCmov tests Cmov() function +func TestCmov(t *testing.T) { + got := CPU.Cmov() + expected := CPU.Features&CMOV == CMOV + if got != expected { + t.Fatalf("Cmov: expected %v, got %v", expected, got) + } + t.Log("CMOV Support:", got) +} + +// TestAmd3dnow tests Amd3dnow() function +func TestAmd3dnow(t *testing.T) { + got := CPU.Amd3dnow() + expected := CPU.Features&AMD3DNOW == AMD3DNOW + if got != expected { + t.Fatalf("Amd3dnow: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOW Support:", got) +} + +// TestAmd3dnowExt tests Amd3dnowExt() function +func TestAmd3dnowExt(t *testing.T) { + got := CPU.Amd3dnowExt() + expected := CPU.Features&AMD3DNOWEXT == AMD3DNOWEXT + if got != expected { + t.Fatalf("Amd3dnowExt: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOWEXT Support:", got) +} + +// TestMMX tests MMX() function +func TestMMX(t *testing.T) { + got := CPU.MMX() + expected := CPU.Features&MMX == MMX + if got != expected { + t.Fatalf("MMX: expected %v, got %v", expected, got) + } + t.Log("MMX Support:", got) +} + +// TestMMXext tests MMXext() function +func TestMMXext(t *testing.T) { + got := CPU.MMXExt() + expected := CPU.Features&MMXEXT == MMXEXT + if got != expected { + t.Fatalf("MMXExt: expected %v, got %v", expected, got) + } + t.Log("MMXEXT Support:", got) +} + +// TestSSE tests SSE() function +func TestSSE(t *testing.T) { + got := CPU.SSE() + expected := CPU.Features&SSE == SSE + if got != expected { + t.Fatalf("SSE: expected %v, got %v", expected, got) + } + t.Log("SSE Support:", got) +} + +// TestSSE2 tests SSE2() function +func TestSSE2(t *testing.T) { + got := CPU.SSE2() + expected := CPU.Features&SSE2 == SSE2 + if got != expected { + t.Fatalf("SSE2: expected %v, got %v", expected, got) + } + t.Log("SSE2 Support:", got) +} + +// TestSSE3 tests SSE3() function +func TestSSE3(t *testing.T) { + got := CPU.SSE3() + expected := CPU.Features&SSE3 == SSE3 + if got != expected { + t.Fatalf("SSE3: expected %v, got %v", expected, got) + } + t.Log("SSE3 Support:", got) +} + +// TestSSSE3 tests SSSE3() function +func TestSSSE3(t *testing.T) { + got := CPU.SSSE3() + expected := CPU.Features&SSSE3 == SSSE3 + if got != expected { + t.Fatalf("SSSE3: expected %v, got %v", expected, got) + } + t.Log("SSSE3 Support:", got) +} + +// TestSSE4 tests SSE4() function +func TestSSE4(t *testing.T) { + got := CPU.SSE4() + expected := CPU.Features&SSE4 == SSE4 + if got != expected { + t.Fatalf("SSE4: expected %v, got %v", expected, got) + } + t.Log("SSE4 Support:", got) +} + +// TestSSE42 tests SSE42() function +func TestSSE42(t *testing.T) { + got := CPU.SSE42() + expected := CPU.Features&SSE42 == SSE42 + if got != expected { + t.Fatalf("SSE42: expected %v, got %v", expected, got) + } + t.Log("SSE42 Support:", got) +} + +// TestAVX tests AVX() function +func TestAVX(t *testing.T) { + got := CPU.AVX() + expected := CPU.Features&AVX == AVX + if got != expected { + t.Fatalf("AVX: expected %v, got %v", expected, got) + } + t.Log("AVX Support:", got) +} + +// TestAVX2 tests AVX2() function +func TestAVX2(t *testing.T) { + got := CPU.AVX2() + expected := CPU.Features&AVX2 == AVX2 + if got != expected { + t.Fatalf("AVX2: expected %v, got %v", expected, got) + } + t.Log("AVX2 Support:", got) +} + +// TestFMA3 tests FMA3() function +func TestFMA3(t *testing.T) { + got := CPU.FMA3() + expected := CPU.Features&FMA3 == FMA3 + if got != expected { + t.Fatalf("FMA3: expected %v, got %v", expected, got) + } + t.Log("FMA3 Support:", got) +} + +// TestFMA4 tests FMA4() function +func TestFMA4(t *testing.T) { + got := CPU.FMA4() + expected := CPU.Features&FMA4 == FMA4 + if got != expected { + t.Fatalf("FMA4: expected %v, got %v", expected, got) + } + t.Log("FMA4 Support:", got) +} + +// TestXOP tests XOP() function +func TestXOP(t *testing.T) { + got := CPU.XOP() + expected := CPU.Features&XOP == XOP + if got != expected { + t.Fatalf("XOP: expected %v, got %v", expected, got) + } + t.Log("XOP Support:", got) +} + +// TestF16C tests F16C() function +func TestF16C(t *testing.T) { + got := CPU.F16C() + expected := CPU.Features&F16C == F16C + if got != expected { + t.Fatalf("F16C: expected %v, got %v", expected, got) + } + t.Log("F16C Support:", got) +} + +// TestCX16 tests CX16() function +func TestCX16(t *testing.T) { + got := CPU.CX16() + expected := CPU.Features&CX16 == CX16 + if got != expected { + t.Fatalf("CX16: expected %v, got %v", expected, got) + } + t.Log("CX16 Support:", got) +} + +// TestSGX tests SGX() function +func TestSGX(t *testing.T) { + got := CPU.SGX.Available + expected := CPU.Features&SGX == SGX + if got != expected { + t.Fatalf("SGX: expected %v, got %v", expected, got) + } + t.Log("SGX Support:", got) +} + +// TestBMI1 tests BMI1() function +func TestBMI1(t *testing.T) { + got := CPU.BMI1() + expected := CPU.Features&BMI1 == BMI1 + if got != expected { + t.Fatalf("BMI1: expected %v, got %v", expected, got) + } + t.Log("BMI1 Support:", got) +} + +// TestBMI2 tests BMI2() function +func TestBMI2(t *testing.T) { + got := CPU.BMI2() + expected := CPU.Features&BMI2 == BMI2 + if got != expected { + t.Fatalf("BMI2: expected %v, got %v", expected, got) + } + t.Log("BMI2 Support:", got) +} + +// TestTBM tests TBM() function +func TestTBM(t *testing.T) { + got := CPU.TBM() + expected := CPU.Features&TBM == TBM + if got != expected { + t.Fatalf("TBM: expected %v, got %v", expected, got) + } + t.Log("TBM Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestLzcnt(t *testing.T) { + got := CPU.Lzcnt() + expected := CPU.Features&LZCNT == LZCNT + if got != expected { + t.Fatalf("Lzcnt: expected %v, got %v", expected, got) + } + t.Log("LZCNT Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestPopcnt(t *testing.T) { + got := CPU.Popcnt() + expected := CPU.Features&POPCNT == POPCNT + if got != expected { + t.Fatalf("Popcnt: expected %v, got %v", expected, got) + } + t.Log("POPCNT Support:", got) +} + +// TestAesNi tests AesNi() function +func TestAesNi(t *testing.T) { + got := CPU.AesNi() + expected := CPU.Features&AESNI == AESNI + if got != expected { + t.Fatalf("AesNi: expected %v, got %v", expected, got) + } + t.Log("AESNI Support:", got) +} + +// TestHTT tests HTT() function +func TestHTT(t *testing.T) { + got := CPU.HTT() + expected := CPU.Features&HTT == HTT + if got != expected { + t.Fatalf("HTT: expected %v, got %v", expected, got) + } + t.Log("HTT Support:", got) +} + +// TestClmul tests Clmul() function +func TestClmul(t *testing.T) { + got := CPU.Clmul() + expected := CPU.Features&CLMUL == CLMUL + if got != expected { + t.Fatalf("Clmul: expected %v, got %v", expected, got) + } + t.Log("CLMUL Support:", got) +} + +// TestSSE2Slow tests SSE2Slow() function +func TestSSE2Slow(t *testing.T) { + got := CPU.SSE2Slow() + expected := CPU.Features&SSE2SLOW == SSE2SLOW + if got != expected { + t.Fatalf("SSE2Slow: expected %v, got %v", expected, got) + } + t.Log("SSE2SLOW Support:", got) +} + +// TestSSE3Slow tests SSE3slow() function +func TestSSE3Slow(t *testing.T) { + got := CPU.SSE3Slow() + expected := CPU.Features&SSE3SLOW == SSE3SLOW + if got != expected { + t.Fatalf("SSE3slow: expected %v, got %v", expected, got) + } + t.Log("SSE3SLOW Support:", got) +} + +// TestAtom tests Atom() function +func TestAtom(t *testing.T) { + got := CPU.Atom() + expected := CPU.Features&ATOM == ATOM + if got != expected { + t.Fatalf("Atom: expected %v, got %v", expected, got) + } + t.Log("ATOM Support:", got) +} + +// TestNX tests NX() function (NX (No-Execute) bit) +func TestNX(t *testing.T) { + got := CPU.NX() + expected := CPU.Features&NX == NX + if got != expected { + t.Fatalf("NX: expected %v, got %v", expected, got) + } + t.Log("NX Support:", got) +} + +// TestSSE4A tests SSE4A() function (AMD Barcelona microarchitecture SSE4a instructions) +func TestSSE4A(t *testing.T) { + got := CPU.SSE4A() + expected := CPU.Features&SSE4A == SSE4A + if got != expected { + t.Fatalf("SSE4A: expected %v, got %v", expected, got) + } + t.Log("SSE4A Support:", got) +} + +// TestHLE tests HLE() function (Hardware Lock Elision) +func TestHLE(t *testing.T) { + got := CPU.HLE() + expected := CPU.Features&HLE == HLE + if got != expected { + t.Fatalf("HLE: expected %v, got %v", expected, got) + } + t.Log("HLE Support:", got) +} + +// TestRTM tests RTM() function (Restricted Transactional Memory) +func TestRTM(t *testing.T) { + got := CPU.RTM() + expected := CPU.Features&RTM == RTM + if got != expected { + t.Fatalf("RTM: expected %v, got %v", expected, got) + } + t.Log("RTM Support:", got) +} + +// TestRdrand tests RDRAND() function (RDRAND instruction is available) +func TestRdrand(t *testing.T) { + got := CPU.Rdrand() + expected := CPU.Features&RDRAND == RDRAND + if got != expected { + t.Fatalf("Rdrand: expected %v, got %v", expected, got) + } + t.Log("Rdrand Support:", got) +} + +// TestRdseed tests RDSEED() function (RDSEED instruction is available) +func TestRdseed(t *testing.T) { + got := CPU.Rdseed() + expected := CPU.Features&RDSEED == RDSEED + if got != expected { + t.Fatalf("Rdseed: expected %v, got %v", expected, got) + } + t.Log("Rdseed Support:", got) +} + +// TestADX tests ADX() function (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +func TestADX(t *testing.T) { + got := CPU.ADX() + expected := CPU.Features&ADX == ADX + if got != expected { + t.Fatalf("ADX: expected %v, got %v", expected, got) + } + t.Log("ADX Support:", got) +} + +// TestSHA tests SHA() function (Intel SHA Extensions) +func TestSHA(t *testing.T) { + got := CPU.SHA() + expected := CPU.Features&SHA == SHA + if got != expected { + t.Fatalf("SHA: expected %v, got %v", expected, got) + } + t.Log("SHA Support:", got) +} + +// TestAVX512F tests AVX512F() function (AVX-512 Foundation) +func TestAVX512F(t *testing.T) { + got := CPU.AVX512F() + expected := CPU.Features&AVX512F == AVX512F + if got != expected { + t.Fatalf("AVX512F: expected %v, got %v", expected, got) + } + t.Log("AVX512F Support:", got) +} + +// TestAVX512DQ tests AVX512DQ() function (AVX-512 Doubleword and Quadword Instructions) +func TestAVX512DQ(t *testing.T) { + got := CPU.AVX512DQ() + expected := CPU.Features&AVX512DQ == AVX512DQ + if got != expected { + t.Fatalf("AVX512DQ: expected %v, got %v", expected, got) + } + t.Log("AVX512DQ Support:", got) +} + +// TestAVX512IFMA tests AVX512IFMA() function (AVX-512 Integer Fused Multiply-Add Instructions) +func TestAVX512IFMA(t *testing.T) { + got := CPU.AVX512IFMA() + expected := CPU.Features&AVX512IFMA == AVX512IFMA + if got != expected { + t.Fatalf("AVX512IFMA: expected %v, got %v", expected, got) + } + t.Log("AVX512IFMA Support:", got) +} + +// TestAVX512PF tests AVX512PF() function (AVX-512 Prefetch Instructions) +func TestAVX512PF(t *testing.T) { + got := CPU.AVX512PF() + expected := CPU.Features&AVX512PF == AVX512PF + if got != expected { + t.Fatalf("AVX512PF: expected %v, got %v", expected, got) + } + t.Log("AVX512PF Support:", got) +} + +// TestAVX512ER tests AVX512ER() function (AVX-512 Exponential and Reciprocal Instructions) +func TestAVX512ER(t *testing.T) { + got := CPU.AVX512ER() + expected := CPU.Features&AVX512ER == AVX512ER + if got != expected { + t.Fatalf("AVX512ER: expected %v, got %v", expected, got) + } + t.Log("AVX512ER Support:", got) +} + +// TestAVX512CD tests AVX512CD() function (AVX-512 Conflict Detection Instructions) +func TestAVX512CD(t *testing.T) { + got := CPU.AVX512CD() + expected := CPU.Features&AVX512CD == AVX512CD + if got != expected { + t.Fatalf("AVX512CD: expected %v, got %v", expected, got) + } + t.Log("AVX512CD Support:", got) +} + +// TestAVX512BW tests AVX512BW() function (AVX-512 Byte and Word Instructions) +func TestAVX512BW(t *testing.T) { + got := CPU.AVX512BW() + expected := CPU.Features&AVX512BW == AVX512BW + if got != expected { + t.Fatalf("AVX512BW: expected %v, got %v", expected, got) + } + t.Log("AVX512BW Support:", got) +} + +// TestAVX512VL tests AVX512VL() function (AVX-512 Vector Length Extensions) +func TestAVX512VL(t *testing.T) { + got := CPU.AVX512VL() + expected := CPU.Features&AVX512VL == AVX512VL + if got != expected { + t.Fatalf("AVX512VL: expected %v, got %v", expected, got) + } + t.Log("AVX512VL Support:", got) +} + +// TestAVX512VL tests AVX512VBMI() function (AVX-512 Vector Bit Manipulation Instructions) +func TestAVX512VBMI(t *testing.T) { + got := CPU.AVX512VBMI() + expected := CPU.Features&AVX512VBMI == AVX512VBMI + if got != expected { + t.Fatalf("AVX512VBMI: expected %v, got %v", expected, got) + } + t.Log("AVX512VBMI Support:", got) +} + +// TestMPX tests MPX() function (Intel MPX (Memory Protection Extensions)) +func TestMPX(t *testing.T) { + got := CPU.MPX() + expected := CPU.Features&MPX == MPX + if got != expected { + t.Fatalf("MPX: expected %v, got %v", expected, got) + } + t.Log("MPX Support:", got) +} + +// TestERMS tests ERMS() function (Enhanced REP MOVSB/STOSB) +func TestERMS(t *testing.T) { + got := CPU.ERMS() + expected := CPU.Features&ERMS == ERMS + if got != expected { + t.Fatalf("ERMS: expected %v, got %v", expected, got) + } + t.Log("ERMS Support:", got) +} + +// TestVendor writes the detected vendor. Will be 0 if unknown +func TestVendor(t *testing.T) { + t.Log("Vendor ID:", CPU.VendorID) +} + +// Intel returns true if vendor is recognized as Intel +func TestIntel(t *testing.T) { + got := CPU.Intel() + expected := CPU.VendorID == Intel + if got != expected { + t.Fatalf("TestIntel: expected %v, got %v", expected, got) + } + t.Log("TestIntel:", got) +} + +// AMD returns true if vendor is recognized as AMD +func TestAMD(t *testing.T) { + got := CPU.AMD() + expected := CPU.VendorID == AMD + if got != expected { + t.Fatalf("TestAMD: expected %v, got %v", expected, got) + } + t.Log("TestAMD:", got) +} + +// Transmeta returns true if vendor is recognized as Transmeta +func TestTransmeta(t *testing.T) { + got := CPU.Transmeta() + expected := CPU.VendorID == Transmeta + if got != expected { + t.Fatalf("TestTransmeta: expected %v, got %v", expected, got) + } + t.Log("TestTransmeta:", got) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestNSC(t *testing.T) { + got := CPU.NSC() + expected := CPU.VendorID == NSC + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// VIA returns true if vendor is recognized as VIA +func TestVIA(t *testing.T) { + got := CPU.VIA() + expected := CPU.VendorID == VIA + if got != expected { + t.Fatalf("TestVIA: expected %v, got %v", expected, got) + } + t.Log("TestVIA:", got) +} + +// Test VM function +func TestVM(t *testing.T) { + t.Log("Vendor ID:", CPU.VM()) +} + +// TSX returns true if cpu supports transactional sync extensions. +func TestCPUInfo_TSX(t *testing.T) { + got := CPU.TSX() + expected := CPU.HLE() && CPU.RTM() + if got != expected { + t.Fatalf("TestCPUInfo_TSX: expected %v, got %v", expected, got) + } + t.Log("TestCPUInfo_TSX:", got) +} + +// Test RTCounter function +func TestRtCounter(t *testing.T) { + a := CPU.RTCounter() + b := CPU.RTCounter() + t.Log("CPU Counter:", a, b, b-a) +} + +// Prints the value of Ia32TscAux() +func TestIa32TscAux(t *testing.T) { + ecx := CPU.Ia32TscAux() + t.Logf("Ia32TscAux:0x%x\n", ecx) + if ecx != 0 { + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + t.Log("Likely chip, core:", chip, core) + } +} + +func TestThreadsPerCoreNZ(t *testing.T) { + if CPU.ThreadsPerCore == 0 { + t.Fatal("threads per core is zero") + } +} + +// Prints the value of LogicalCPU() +func TestLogicalCPU(t *testing.T) { + t.Log("Currently executing on cpu:", CPU.LogicalCPU()) +} + +func TestMaxFunction(t *testing.T) { + expect := maxFunctionID() + if CPU.maxFunc != expect { + t.Fatal("Max function does not match, expected", expect, "but got", CPU.maxFunc) + } + expect = maxExtendedFunction() + if CPU.maxExFunc != expect { + t.Fatal("Max Extended function does not match, expected", expect, "but got", CPU.maxFunc) + } +} + +// This example will calculate the chip/core number on Linux +// Linux encodes numa id (<<12) and core id (8bit) into TSC_AUX. +func ExampleCPUInfo_Ia32TscAux() { + ecx := CPU.Ia32TscAux() + if ecx == 0 { + fmt.Println("Unknown CPU ID") + return + } + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + fmt.Println("Chip, Core:", chip, core) +} + +/* +func TestPhysical(t *testing.T) { + var test16 = "CPUID 00000000: 0000000d-756e6547-6c65746e-49656e69 \nCPUID 00000001: 000206d7-03200800-1fbee3ff-bfebfbff \nCPUID 00000002: 76035a01-00f0b2ff-00000000-00ca0000 \nCPUID 00000003: 00000000-00000000-00000000-00000000 \nCPUID 00000004: 3c004121-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004122-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004143-01c0003f-000001ff-00000000 \nCPUID 00000004: 3c07c163-04c0003f-00003fff-00000006 \nCPUID 00000005: 00000040-00000040-00000003-00021120 \nCPUID 00000006: 00000075-00000002-00000009-00000000 \nCPUID 00000007: 00000000-00000000-00000000-00000000 \nCPUID 00000008: 00000000-00000000-00000000-00000000 \nCPUID 00000009: 00000001-00000000-00000000-00000000 \nCPUID 0000000a: 07300403-00000000-00000000-00000603 \nCPUID 0000000b: 00000000-00000000-00000003-00000003 \nCPUID 0000000b: 00000005-00000010-00000201-00000003 \nCPUID 0000000c: 00000000-00000000-00000000-00000000 \nCPUID 0000000d: 00000007-00000340-00000340-00000000 \nCPUID 0000000d: 00000001-00000000-00000000-00000000 \nCPUID 0000000d: 00000100-00000240-00000000-00000000 \nCPUID 80000000: 80000008-00000000-00000000-00000000 \nCPUID 80000001: 00000000-00000000-00000001-2c100800 \nCPUID 80000002: 20202020-49202020-6c65746e-20295228 \nCPUID 80000003: 6e6f6558-20295228-20555043-322d3545 \nCPUID 80000004: 20303636-20402030-30322e32-007a4847 \nCPUID 80000005: 00000000-00000000-00000000-00000000 \nCPUID 80000006: 00000000-00000000-01006040-00000000 \nCPUID 80000007: 00000000-00000000-00000000-00000100 \nCPUID 80000008: 0000302e-00000000-00000000-00000000" + restore := mockCPU([]byte(test16)) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + Detect() + TestCPUID(t) +} +*/ diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go new file mode 100644 index 00000000000..a5f04dd6d0a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo amd64,!gccgo + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go new file mode 100644 index 00000000000..909c5d9a7ae --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 gccgo + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go new file mode 100644 index 00000000000..90e7a98d278 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/generate.go @@ -0,0 +1,4 @@ +package cpuid + +//go:generate go run private-gen.go +//go:generate gofmt -w ./private diff --git a/vendor/github.com/klauspost/cpuid/mockcpu_test.go b/vendor/github.com/klauspost/cpuid/mockcpu_test.go new file mode 100644 index 00000000000..f15173f7381 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/mockcpu_test.go @@ -0,0 +1,209 @@ +package cpuid + +import ( + "archive/zip" + "fmt" + "io/ioutil" + "sort" + "strings" + "testing" +) + +type fakecpuid map[uint32][][]uint32 + +type idfuncs struct { + cpuid func(op uint32) (eax, ebx, ecx, edx uint32) + cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) + xgetbv func(index uint32) (eax, edx uint32) +} + +func (f fakecpuid) String() string { + var out = make([]string, 0, len(f)) + for key, val := range f { + for _, v := range val { + out = append(out, fmt.Sprintf("CPUID %08x: [%08x, %08x, %08x, %08x]", key, v[0], v[1], v[2], v[3])) + } + } + sorter := sort.StringSlice(out) + sort.Sort(&sorter) + return strings.Join(sorter, "\n") +} + +func mockCPU(def []byte) func() { + lines := strings.Split(string(def), "\n") + anyfound := false + fakeID := make(fakecpuid) + for _, line := range lines { + line = strings.Trim(line, "\r\t ") + if !strings.HasPrefix(line, "CPUID") { + continue + } + // Only collect for first cpu + if strings.HasPrefix(line, "CPUID 00000000") { + if anyfound { + break + } + } + if !strings.Contains(line, "-") { + //continue + } + items := strings.Split(line, ":") + if len(items) < 2 { + if len(line) == 51 || len(line) == 50 { + items = []string{line[0:14], line[15:]} + } else { + items = strings.Split(line, "\t") + if len(items) != 2 { + //fmt.Println("not found:", line, "len:", len(line)) + continue + } + } + } + items = items[0:2] + vals := strings.Trim(items[1], "\r\n ") + + var idV uint32 + n, err := fmt.Sscanf(items[0], "CPUID %x", &idV) + if err != nil || n != 1 { + continue + } + existing, ok := fakeID[idV] + if !ok { + existing = make([][]uint32, 0) + } + + values := make([]uint32, 4) + n, err = fmt.Sscanf(vals, "%x-%x-%x-%x", &values[0], &values[1], &values[2], &values[3]) + if n != 4 || err != nil { + n, err = fmt.Sscanf(vals, "%x %x %x %x", &values[0], &values[1], &values[2], &values[3]) + if n != 4 || err != nil { + //fmt.Println("scanned", vals, "got", n, "Err:", err) + continue + } + } + + existing = append(existing, values) + fakeID[idV] = existing + anyfound = true + } + + restorer := func(f idfuncs) func() { + return func() { + cpuid = f.cpuid + cpuidex = f.cpuidex + xgetbv = f.xgetbv + } + }(idfuncs{cpuid: cpuid, cpuidex: cpuidex, xgetbv: xgetbv}) + + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + if op == 0x80000000 || op == 0 { + var ok bool + _, ok = fakeID[op] + if !ok { + return 0, 0, 0, 0 + } + } + first, ok := fakeID[op] + if !ok { + if op > maxFunctionID() { + panic(fmt.Sprintf("Base not found: %v, request:%#v\n", fakeID, op)) + } else { + // we have some entries missing + return 0, 0, 0, 0 + } + } + theid := first[0] + return theid[0], theid[1], theid[2], theid[3] + } + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + if op == 0x80000000 { + var ok bool + _, ok = fakeID[op] + if !ok { + return 0, 0, 0, 0 + } + } + first, ok := fakeID[op] + if !ok { + if op > maxExtendedFunction() { + panic(fmt.Sprintf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2)) + } else { + // we have some entries missing + return 0, 0, 0, 0 + } + } + if int(op2) >= len(first) { + //fmt.Printf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2) + return 0, 0, 0, 0 + } + theid := first[op2] + return theid[0], theid[1], theid[2], theid[3] + } + xgetbv = func(index uint32) (eax, edx uint32) { + first, ok := fakeID[1] + if !ok { + panic(fmt.Sprintf("XGETBV not supported %v", fakeID)) + } + second := first[0] + // ECX bit 26 must be set + if (second[2] & 1 << 26) == 0 { + panic(fmt.Sprintf("XGETBV not supported %v", fakeID)) + } + // We don't have any data to return, unfortunately + return 0, 0 + } + return restorer +} + +func TestMocks(t *testing.T) { + zr, err := zip.OpenReader("testdata/cpuid_data.zip") + if err != nil { + t.Skip("No testdata:", err) + } + defer zr.Close() + for _, f := range zr.File { + rc, err := f.Open() + if err != nil { + t.Fatal(err) + } + content, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal(err) + } + rc.Close() + t.Log("Opening", f.FileInfo().Name()) + restore := mockCPU(content) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + } + Detect() + +} diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go new file mode 100644 index 00000000000..437333d2922 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private-gen.go @@ -0,0 +1,476 @@ +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var inFiles = []string{"cpuid.go", "cpuid_test.go"} +var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} +var fileSet = token.NewFileSet() +var reWrites = []rewrite{ + initRewrite("CPUInfo -> cpuInfo"), + initRewrite("Vendor -> vendor"), + initRewrite("Flags -> flags"), + initRewrite("Detect -> detect"), + initRewrite("CPU -> cpu"), +} +var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, + // cpuid_test.go + "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, +} + +var excludePrefixes = []string{"test", "benchmark"} + +func main() { + Package := "private" + parserMode := parser.ParseComments + exported := make(map[string]rewrite) + for _, file := range inFiles { + in, err := os.Open(file) + if err != nil { + log.Fatalf("opening input", err) + } + + src, err := ioutil.ReadAll(in) + if err != nil { + log.Fatalf("reading input", err) + } + + astfile, err := parser.ParseFile(fileSet, file, src, parserMode) + if err != nil { + log.Fatalf("parsing input", err) + } + + for _, rw := range reWrites { + astfile = rw(astfile) + } + + // Inspect the AST and print all identifiers and literals. + var startDecl token.Pos + var endDecl token.Pos + ast.Inspect(astfile, func(n ast.Node) bool { + var s string + switch x := n.(type) { + case *ast.Ident: + if x.IsExported() { + t := strings.ToLower(x.Name) + for _, pre := range excludePrefixes { + if strings.HasPrefix(t, pre) { + return true + } + } + if excludeNames[t] != true { + //if x.Pos() > startDecl && x.Pos() < endDecl { + exported[x.Name] = initRewrite(x.Name + " -> " + t) + } + } + + case *ast.GenDecl: + if x.Tok == token.CONST && x.Lparen > 0 { + startDecl = x.Lparen + endDecl = x.Rparen + // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) + } + } + if s != "" { + fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) + } + return true + }) + + for _, rw := range exported { + astfile = rw(astfile) + } + + var buf bytes.Buffer + + printer.Fprint(&buf, fileSet, astfile) + + // Remove package documentation and insert information + s := buf.String() + ind := strings.Index(buf.String(), "\npackage cpuid") + s = s[ind:] + s = "// Generated, DO NOT EDIT,\n" + + "// but copy it to your own project and rename the package.\n" + + "// See more at http://github.com/klauspost/cpuid\n" + + s + + outputName := Package + string(os.PathSeparator) + file + + err = ioutil.WriteFile(outputName, []byte(s), 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } + log.Println("Generated", outputName) + } + + for _, file := range copyFiles { + dst := "" + if strings.HasPrefix(file, "cpuid") { + dst = Package + string(os.PathSeparator) + file + } else { + dst = Package + string(os.PathSeparator) + "cpuid_" + file + } + err := copyFile(file, dst) + if err != nil { + log.Fatalf("copying file: %s", err) + } + log.Println("Copied", dst) + } +} + +// CopyFile copies a file from src to dst. If src and dst files exist, and are +// the same, then return success. Copy the file contents from src to dst. +func copyFile(src, dst string) (err error) { + sfi, err := os.Stat(src) + if err != nil { + return + } + if !sfi.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) + } + dfi, err := os.Stat(dst) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(dfi.Mode().IsRegular()) { + return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) + } + if os.SameFile(sfi, dfi) { + return + } + } + err = copyFileContents(src, dst) + return +} + +// copyFileContents copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +type rewrite func(*ast.File) *ast.File + +// Mostly copied from gofmt +func initRewrite(rewriteRule string) rewrite { + f := strings.Split(rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + for k := range m { + delete(m, k) + } + val = apply(rewriteVal, val) + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match returns true if pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Position) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Ptr: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/vendor/github.com/klauspost/cpuid/private/README.md b/vendor/github.com/klauspost/cpuid/private/README.md new file mode 100644 index 00000000000..57a68f88274 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/README.md @@ -0,0 +1,6 @@ +# cpuid private + +This is a specially converted of the cpuid package, so it can be included in +a package without exporting anything. + +Package home: https://github.com/klauspost/cpuid diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid.go b/vendor/github.com/klauspost/cpuid/private/cpuid.go new file mode 100644 index 00000000000..b7995156c61 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid.go @@ -0,0 +1,1034 @@ +// Generated, DO NOT EDIT, +// but copy it to your own project and rename the package. +// See more at http://github.com/klauspost/cpuid + +package cpuid + +import "strings" + +// Vendor is a representation of a CPU vendor. +type vendor int + +const ( + other vendor = iota + intel + amd + via + transmeta + nsc + kvm // Kernel-based Virtual Machine + msvm // Microsoft Hyper-V or Windows Virtual PC + vmware + xenhvm +) + +const ( + cmov = 1 << iota // i686 CMOV + nx // NX (No-Execute) bit + amd3dnow // AMD 3DNOW + amd3dnowext // AMD 3DNowExt + mmx // standard MMX + mmxext // SSE integer functions or AMD MMX ext + sse // SSE functions + sse2 // P4 SSE functions + sse3 // Prescott SSE3 functions + ssse3 // Conroe SSSE3 functions + sse4 // Penryn SSE4.1 functions + sse4a // AMD Barcelona microarchitecture SSE4a instructions + sse42 // Nehalem SSE4.2 functions + avx // AVX functions + avx2 // AVX2 functions + fma3 // Intel FMA 3 + fma4 // Bulldozer FMA4 functions + xop // Bulldozer XOP functions + f16c // Half-precision floating-point conversion + bmi1 // Bit Manipulation Instruction Set 1 + bmi2 // Bit Manipulation Instruction Set 2 + tbm // AMD Trailing Bit Manipulation + lzcnt // LZCNT instruction + popcnt // POPCNT instruction + aesni // Advanced Encryption Standard New Instructions + clmul // Carry-less Multiplication + htt // Hyperthreading (enabled) + hle // Hardware Lock Elision + rtm // Restricted Transactional Memory + rdrand // RDRAND instruction is available + rdseed // RDSEED instruction is available + adx // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + sha // Intel SHA Extensions + avx512f // AVX-512 Foundation + avx512dq // AVX-512 Doubleword and Quadword Instructions + avx512ifma // AVX-512 Integer Fused Multiply-Add Instructions + avx512pf // AVX-512 Prefetch Instructions + avx512er // AVX-512 Exponential and Reciprocal Instructions + avx512cd // AVX-512 Conflict Detection Instructions + avx512bw // AVX-512 Byte and Word Instructions + avx512vl // AVX-512 Vector Length Extensions + avx512vbmi // AVX-512 Vector Bit Manipulation Instructions + mpx // Intel MPX (Memory Protection Extensions) + erms // Enhanced REP MOVSB/STOSB + rdtscp // RDTSCP Instruction + cx16 // CMPXCHG16B Instruction + sgx // Software Guard Extensions + ibpb // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + stibp // Single Thread Indirect Branch Predictors + + // Performance indicators + sse2slow // SSE2 is supported, but usually not faster + sse3slow // SSE3 is supported, but usually not faster + atom // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[flags]string{ + cmov: "CMOV", // i686 CMOV + nx: "NX", // NX (No-Execute) bit + amd3dnow: "AMD3DNOW", // AMD 3DNOW + amd3dnowext: "AMD3DNOWEXT", // AMD 3DNowExt + mmx: "MMX", // Standard MMX + mmxext: "MMXEXT", // SSE integer functions or AMD MMX ext + sse: "SSE", // SSE functions + sse2: "SSE2", // P4 SSE2 functions + sse3: "SSE3", // Prescott SSE3 functions + ssse3: "SSSE3", // Conroe SSSE3 functions + sse4: "SSE4.1", // Penryn SSE4.1 functions + sse4a: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + sse42: "SSE4.2", // Nehalem SSE4.2 functions + avx: "AVX", // AVX functions + avx2: "AVX2", // AVX functions + fma3: "FMA3", // Intel FMA 3 + fma4: "FMA4", // Bulldozer FMA4 functions + xop: "XOP", // Bulldozer XOP functions + f16c: "F16C", // Half-precision floating-point conversion + bmi1: "BMI1", // Bit Manipulation Instruction Set 1 + bmi2: "BMI2", // Bit Manipulation Instruction Set 2 + tbm: "TBM", // AMD Trailing Bit Manipulation + lzcnt: "LZCNT", // LZCNT instruction + popcnt: "POPCNT", // POPCNT instruction + aesni: "AESNI", // Advanced Encryption Standard New Instructions + clmul: "CLMUL", // Carry-less Multiplication + htt: "HTT", // Hyperthreading (enabled) + hle: "HLE", // Hardware Lock Elision + rtm: "RTM", // Restricted Transactional Memory + rdrand: "RDRAND", // RDRAND instruction is available + rdseed: "RDSEED", // RDSEED instruction is available + adx: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + sha: "SHA", // Intel SHA Extensions + avx512f: "AVX512F", // AVX-512 Foundation + avx512dq: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + avx512ifma: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + avx512pf: "AVX512PF", // AVX-512 Prefetch Instructions + avx512er: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + avx512cd: "AVX512CD", // AVX-512 Conflict Detection Instructions + avx512bw: "AVX512BW", // AVX-512 Byte and Word Instructions + avx512vl: "AVX512VL", // AVX-512 Vector Length Extensions + avx512vbmi: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + mpx: "MPX", // Intel MPX (Memory Protection Extensions) + erms: "ERMS", // Enhanced REP MOVSB/STOSB + rdtscp: "RDTSCP", // RDTSCP Instruction + cx16: "CX16", // CMPXCHG16B Instruction + sgx: "SGX", // Software Guard Extensions + ibpb: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier + stibp: "STIBP", // Single Thread Indirect Branch Predictors + + // Performance indicators + sse2slow: "SSE2SLOW", // SSE2 supported, but usually not faster + sse3slow: "SSE3SLOW", // SSE3 supported, but usually not faster + atom: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type cpuInfo struct { + brandname string // Brand name reported by the CPU + vendorid vendor // Comparable CPU vendor ID + features flags // Features of the CPU + physicalcores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + threadspercore int // Number of threads per physical core. Will be 1 if undetectable. + logicalcores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + family int // CPU family number + model int // CPU model number + cacheline int // Cache line size in bytes. Will be 0 if undetectable. + cache struct { + l1i int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + l1d int // L1 Data Cache (per core or shared). Will be -1 if undetected + l2 int // L2 Cache (per core or shared). Will be -1 if undetected + l3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + sgx sgxsupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var cpu cpuInfo + +func init() { + initCPU() + detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func detect() { + cpu.maxFunc = maxFunctionID() + cpu.maxExFunc = maxExtendedFunction() + cpu.brandname = brandName() + cpu.cacheline = cacheLine() + cpu.family, cpu.model = familyModel() + cpu.features = support() + cpu.sgx = hasSGX(cpu.features&sgx != 0) + cpu.threadspercore = threadsPerCore() + cpu.logicalcores = logicalCores() + cpu.physicalcores = physicalCores() + cpu.vendorid = vendorID() + cpu.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c cpuInfo) cmov() bool { + return c.features&cmov != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c cpuInfo) amd3dnow() bool { + return c.features&amd3dnow != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c cpuInfo) amd3dnowext() bool { + return c.features&amd3dnowext != 0 +} + +// MMX indicates support of MMX instructions +func (c cpuInfo) mmx() bool { + return c.features&mmx != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c cpuInfo) mmxext() bool { + return c.features&mmxext != 0 +} + +// SSE indicates support of SSE instructions +func (c cpuInfo) sse() bool { + return c.features&sse != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c cpuInfo) sse2() bool { + return c.features&sse2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c cpuInfo) sse3() bool { + return c.features&sse3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c cpuInfo) ssse3() bool { + return c.features&ssse3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c cpuInfo) sse4() bool { + return c.features&sse4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c cpuInfo) sse42() bool { + return c.features&sse42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c cpuInfo) avx() bool { + return c.features&avx != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c cpuInfo) avx2() bool { + return c.features&avx2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c cpuInfo) fma3() bool { + return c.features&fma3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c cpuInfo) fma4() bool { + return c.features&fma4 != 0 +} + +// XOP indicates support of XOP instructions +func (c cpuInfo) xop() bool { + return c.features&xop != 0 +} + +// F16C indicates support of F16C instructions +func (c cpuInfo) f16c() bool { + return c.features&f16c != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c cpuInfo) bmi1() bool { + return c.features&bmi1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c cpuInfo) bmi2() bool { + return c.features&bmi2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c cpuInfo) tbm() bool { + return c.features&tbm != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c cpuInfo) lzcnt() bool { + return c.features&lzcnt != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c cpuInfo) popcnt() bool { + return c.features&popcnt != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c cpuInfo) htt() bool { + return c.features&htt != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c cpuInfo) sse2slow() bool { + return c.features&sse2slow != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c cpuInfo) sse3slow() bool { + return c.features&sse3slow != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c cpuInfo) aesni() bool { + return c.features&aesni != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c cpuInfo) clmul() bool { + return c.features&clmul != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c cpuInfo) nx() bool { + return c.features&nx != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c cpuInfo) sse4a() bool { + return c.features&sse4a != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c cpuInfo) hle() bool { + return c.features&hle != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c cpuInfo) rtm() bool { + return c.features&rtm != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c cpuInfo) rdrand() bool { + return c.features&rdrand != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c cpuInfo) rdseed() bool { + return c.features&rdseed != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c cpuInfo) adx() bool { + return c.features&adx != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c cpuInfo) sha() bool { + return c.features&sha != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c cpuInfo) avx512f() bool { + return c.features&avx512f != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c cpuInfo) avx512dq() bool { + return c.features&avx512dq != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c cpuInfo) avx512ifma() bool { + return c.features&avx512ifma != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c cpuInfo) avx512pf() bool { + return c.features&avx512pf != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c cpuInfo) avx512er() bool { + return c.features&avx512er != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c cpuInfo) avx512cd() bool { + return c.features&avx512cd != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c cpuInfo) avx512bw() bool { + return c.features&avx512bw != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c cpuInfo) avx512vl() bool { + return c.features&avx512vl != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c cpuInfo) avx512vbmi() bool { + return c.features&avx512vbmi != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c cpuInfo) mpx() bool { + return c.features&mpx != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c cpuInfo) erms() bool { + return c.features&erms != 0 +} + +// RDTSCP Instruction is available. +func (c cpuInfo) rdtscp() bool { + return c.features&rdtscp != 0 +} + +// CX16 indicates if CMPXCHG16B instruction is available. +func (c cpuInfo) cx16() bool { + return c.features&cx16 != 0 +} + +// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. +// So TSX simply checks that. +func (c cpuInfo) tsx() bool { + return c.features&(mpx|rtm) == mpx|rtm +} + +// Atom indicates an Atom processor +func (c cpuInfo) atom() bool { + return c.features&atom != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c cpuInfo) intel() bool { + return c.vendorid == intel +} + +// AMD returns true if vendor is recognized as AMD +func (c cpuInfo) amd() bool { + return c.vendorid == amd +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c cpuInfo) transmeta() bool { + return c.vendorid == transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c cpuInfo) nsc() bool { + return c.vendorid == nsc +} + +// VIA returns true if vendor is recognized as VIA +func (c cpuInfo) via() bool { + return c.vendorid == via +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c cpuInfo) rtcounter() uint64 { + if !c.rdtscp() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c cpuInfo) ia32tscaux() uint32 { + if !c.rdtscp() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c cpuInfo) logicalcpu() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c cpuInfo) vm() bool { + switch c.vendorid { + case msvm, kvm, vmware, xenhvm: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f flags) String() string { + return strings.Join(f.strings(), ",") +} + +// Strings returns and array of the detected features. +func (f flags) strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case amd: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case intel: + return logicalCores() / threadsPerCore() + case amd: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]vendor{ + "AMDisbetter!": amd, + "AuthenticAMD": amd, + "CentaurHauls": via, + "GenuineIntel": intel, + "TransmetaCPU": transmeta, + "GenuineTMx86": transmeta, + "Geode by NSC": nsc, + "VIA VIA VIA ": via, + "KVMKVMKVMKVM": kvm, + "Microsoft Hv": msvm, + "VMwareVMware": vmware, + "XenVMMXenVMM": xenhvm, +} + +func vendorID() vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *cpuInfo) cacheSize() { + c.cache.l1d = -1 + c.cache.l1i = -1 + c.cache.l2 = -1 + c.cache.l3 = -1 + vendor := vendorID() + switch vendor { + case intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.cache.l1d = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.cache.l1i = size + } else { + if c.cache.l1d < 0 { + c.cache.l1i = size + } + if c.cache.l1i < 0 { + c.cache.l1i = size + } + } + case 2: + c.cache.l2 = size + case 3: + c.cache.l3 = size + } + } + case amd: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.cache.l1d = int(((ecx >> 24) & 0xFF) * 1024) + c.cache.l1i = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.cache.l2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +type sgxsupport struct { + available bool + sgx1supported bool + sgx2supported bool + maxenclavesizenot64 int64 + maxenclavesize64 int64 +} + +func hasSGX(available bool) (rval sgxsupport) { + rval.available = available + + if !available { + return + } + + a, _, _, d := cpuidex(0x12, 0) + rval.sgx1supported = a&0x01 != 0 + rval.sgx2supported = a&0x02 != 0 + rval.maxenclavesizenot64 = 1 << (d & 0xFF) // pow 2 + rval.maxenclavesize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + + return +} + +func support() flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= cmov + } + if (d & (1 << 23)) != 0 { + rval |= mmx + } + if (d & (1 << 25)) != 0 { + rval |= mmxext + } + if (d & (1 << 25)) != 0 { + rval |= sse + } + if (d & (1 << 26)) != 0 { + rval |= sse2 + } + if (c & 1) != 0 { + rval |= sse3 + } + if (c & 0x00000200) != 0 { + rval |= ssse3 + } + if (c & 0x00080000) != 0 { + rval |= sse4 + } + if (c & 0x00100000) != 0 { + rval |= sse42 + } + if (c & (1 << 25)) != 0 { + rval |= aesni + } + if (c & (1 << 1)) != 0 { + rval |= clmul + } + if c&(1<<23) != 0 { + rval |= popcnt + } + if c&(1<<30) != 0 { + rval |= rdrand + } + if c&(1<<29) != 0 { + rval |= f16c + } + if c&(1<<13) != 0 { + rval |= cx16 + } + if vend == intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= htt + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= avx + if (c & 0x00001000) != 0 { + rval |= fma3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + if (rval&avx) != 0 && (ebx&0x00000020) != 0 { + rval |= avx2 + } + if (ebx & 0x00000008) != 0 { + rval |= bmi1 + if (ebx & 0x00000100) != 0 { + rval |= bmi2 + } + } + if ebx&(1<<2) != 0 { + rval |= sgx + } + if ebx&(1<<4) != 0 { + rval |= hle + } + if ebx&(1<<9) != 0 { + rval |= erms + } + if ebx&(1<<11) != 0 { + rval |= rtm + } + if ebx&(1<<14) != 0 { + rval |= mpx + } + if ebx&(1<<18) != 0 { + rval |= rdseed + } + if ebx&(1<<19) != 0 { + rval |= adx + } + if ebx&(1<<29) != 0 { + rval |= sha + } + if edx&(1<<26) != 0 { + rval |= ibpb + } + if edx&(1<<27) != 0 { + rval |= stibp + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= avx512f + } + if ebx&(1<<17) != 0 { + rval |= avx512dq + } + if ebx&(1<<21) != 0 { + rval |= avx512ifma + } + if ebx&(1<<26) != 0 { + rval |= avx512pf + } + if ebx&(1<<27) != 0 { + rval |= avx512er + } + if ebx&(1<<28) != 0 { + rval |= avx512cd + } + if ebx&(1<<30) != 0 { + rval |= avx512bw + } + if ebx&(1<<31) != 0 { + rval |= avx512vl + } + // ecx + if ecx&(1<<1) != 0 { + rval |= avx512vbmi + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= lzcnt + rval |= popcnt + } + if (d & (1 << 31)) != 0 { + rval |= amd3dnow + } + if (d & (1 << 30)) != 0 { + rval |= amd3dnowext + } + if (d & (1 << 23)) != 0 { + rval |= mmx + } + if (d & (1 << 22)) != 0 { + rval |= mmxext + } + if (c & (1 << 6)) != 0 { + rval |= sse4a + } + if d&(1<<20) != 0 { + rval |= nx + } + if d&(1<<27) != 0 { + rval |= rdtscp + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != intel && + rval&sse2 != 0 && (c&0x00000040) == 0 { + rval |= sse2slow + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & avx) != 0 { + if (c & 0x00000800) != 0 { + rval |= xop + } + if (c & 0x00010000) != 0 { + rval |= fma4 + } + } + + if vendorID() == intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & sse2) != 0 { + rval |= sse2slow + } + if (rval & sse3) != 0 { + rval |= sse3slow + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= atom + } + } + } + return flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_386.s b/vendor/github.com/klauspost/cpuid/private/cpuid_386.s new file mode 100644 index 00000000000..4d731711e48 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_386.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s new file mode 100644 index 00000000000..3c1d60e4221 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go new file mode 100644 index 00000000000..a5f04dd6d0a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo amd64,!gccgo + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go new file mode 100644 index 00000000000..909c5d9a7ae --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 gccgo + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_test.go b/vendor/github.com/klauspost/cpuid/private/cpuid_test.go new file mode 100644 index 00000000000..31b7f3feb82 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_test.go @@ -0,0 +1,739 @@ +// Generated, DO NOT EDIT, +// but copy it to your own project and rename the package. +// See more at http://github.com/klauspost/cpuid + +package cpuid + +import ( + "fmt" + "testing" +) + +// There is no real way to test a CPU identifier, since results will +// obviously differ on each machine. +func TestCPUID(t *testing.T) { + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("Name:", cpu.brandname) + t.Log("PhysicalCores:", cpu.physicalcores) + t.Log("ThreadsPerCore:", cpu.threadspercore) + t.Log("LogicalCores:", cpu.logicalcores) + t.Log("Family", cpu.family, "Model:", cpu.model) + t.Log("Features:", cpu.features) + t.Log("Cacheline bytes:", cpu.cacheline) + t.Log("L1 Instruction Cache:", cpu.cache.l1i, "bytes") + t.Log("L1 Data Cache:", cpu.cache.l1d, "bytes") + t.Log("L2 Cache:", cpu.cache.l2, "bytes") + t.Log("L3 Cache:", cpu.cache.l3, "bytes") + + if cpu.sse2() { + t.Log("We have SSE2") + } +} + +func TestDumpCPUID(t *testing.T) { + n := int(maxFunctionID()) + for i := 0; i <= n; i++ { + a, b, c, d := cpuidex(uint32(i), 0) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + ex := uint32(1) + for { + a2, b2, c2, d2 := cpuidex(uint32(i), ex) + if a2 == a && b2 == b && d2 == d || ex > 50 || a2 == 0 { + break + } + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a2, b2, c2, d2) + a, b, c, d = a2, b2, c2, d2 + ex++ + } + } + n2 := maxExtendedFunction() + for i := uint32(0x80000000); i <= n2; i++ { + a, b, c, d := cpuid(i) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + } +} + +func example() { + // Print basic CPU information: + fmt.Println("Name:", cpu.brandname) + fmt.Println("PhysicalCores:", cpu.physicalcores) + fmt.Println("ThreadsPerCore:", cpu.threadspercore) + fmt.Println("LogicalCores:", cpu.logicalcores) + fmt.Println("Family", cpu.family, "Model:", cpu.model) + fmt.Println("Features:", cpu.features) + fmt.Println("Cacheline bytes:", cpu.cacheline) + + // Test if we have a specific feature: + if cpu.sse() { + fmt.Println("We have Streaming SIMD Extensions") + } +} + +func TestBrandNameZero(t *testing.T) { + if len(cpu.brandname) > 0 { + // Cut out last byte + last := []byte(cpu.brandname[len(cpu.brandname)-1:]) + if last[0] == 0 { + t.Fatal("last byte was zero") + } else if last[0] == 32 { + t.Fatal("whitespace wasn't trimmed") + } + } +} + +// Generated here: http://play.golang.org/p/mko-0tFt0Q + +// TestCmov tests Cmov() function +func TestCmov(t *testing.T) { + got := cpu.cmov() + expected := cpu.features&cmov == cmov + if got != expected { + t.Fatalf("Cmov: expected %v, got %v", expected, got) + } + t.Log("CMOV Support:", got) +} + +// TestAmd3dnow tests Amd3dnow() function +func TestAmd3dnow(t *testing.T) { + got := cpu.amd3dnow() + expected := cpu.features&amd3dnow == amd3dnow + if got != expected { + t.Fatalf("Amd3dnow: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOW Support:", got) +} + +// TestAmd3dnowExt tests Amd3dnowExt() function +func TestAmd3dnowExt(t *testing.T) { + got := cpu.amd3dnowext() + expected := cpu.features&amd3dnowext == amd3dnowext + if got != expected { + t.Fatalf("Amd3dnowExt: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOWEXT Support:", got) +} + +// TestMMX tests MMX() function +func TestMMX(t *testing.T) { + got := cpu.mmx() + expected := cpu.features&mmx == mmx + if got != expected { + t.Fatalf("MMX: expected %v, got %v", expected, got) + } + t.Log("MMX Support:", got) +} + +// TestMMXext tests MMXext() function +func TestMMXext(t *testing.T) { + got := cpu.mmxext() + expected := cpu.features&mmxext == mmxext + if got != expected { + t.Fatalf("MMXExt: expected %v, got %v", expected, got) + } + t.Log("MMXEXT Support:", got) +} + +// TestSSE tests SSE() function +func TestSSE(t *testing.T) { + got := cpu.sse() + expected := cpu.features&sse == sse + if got != expected { + t.Fatalf("SSE: expected %v, got %v", expected, got) + } + t.Log("SSE Support:", got) +} + +// TestSSE2 tests SSE2() function +func TestSSE2(t *testing.T) { + got := cpu.sse2() + expected := cpu.features&sse2 == sse2 + if got != expected { + t.Fatalf("SSE2: expected %v, got %v", expected, got) + } + t.Log("SSE2 Support:", got) +} + +// TestSSE3 tests SSE3() function +func TestSSE3(t *testing.T) { + got := cpu.sse3() + expected := cpu.features&sse3 == sse3 + if got != expected { + t.Fatalf("SSE3: expected %v, got %v", expected, got) + } + t.Log("SSE3 Support:", got) +} + +// TestSSSE3 tests SSSE3() function +func TestSSSE3(t *testing.T) { + got := cpu.ssse3() + expected := cpu.features&ssse3 == ssse3 + if got != expected { + t.Fatalf("SSSE3: expected %v, got %v", expected, got) + } + t.Log("SSSE3 Support:", got) +} + +// TestSSE4 tests SSE4() function +func TestSSE4(t *testing.T) { + got := cpu.sse4() + expected := cpu.features&sse4 == sse4 + if got != expected { + t.Fatalf("SSE4: expected %v, got %v", expected, got) + } + t.Log("SSE4 Support:", got) +} + +// TestSSE42 tests SSE42() function +func TestSSE42(t *testing.T) { + got := cpu.sse42() + expected := cpu.features&sse42 == sse42 + if got != expected { + t.Fatalf("SSE42: expected %v, got %v", expected, got) + } + t.Log("SSE42 Support:", got) +} + +// TestAVX tests AVX() function +func TestAVX(t *testing.T) { + got := cpu.avx() + expected := cpu.features&avx == avx + if got != expected { + t.Fatalf("AVX: expected %v, got %v", expected, got) + } + t.Log("AVX Support:", got) +} + +// TestAVX2 tests AVX2() function +func TestAVX2(t *testing.T) { + got := cpu.avx2() + expected := cpu.features&avx2 == avx2 + if got != expected { + t.Fatalf("AVX2: expected %v, got %v", expected, got) + } + t.Log("AVX2 Support:", got) +} + +// TestFMA3 tests FMA3() function +func TestFMA3(t *testing.T) { + got := cpu.fma3() + expected := cpu.features&fma3 == fma3 + if got != expected { + t.Fatalf("FMA3: expected %v, got %v", expected, got) + } + t.Log("FMA3 Support:", got) +} + +// TestFMA4 tests FMA4() function +func TestFMA4(t *testing.T) { + got := cpu.fma4() + expected := cpu.features&fma4 == fma4 + if got != expected { + t.Fatalf("FMA4: expected %v, got %v", expected, got) + } + t.Log("FMA4 Support:", got) +} + +// TestXOP tests XOP() function +func TestXOP(t *testing.T) { + got := cpu.xop() + expected := cpu.features&xop == xop + if got != expected { + t.Fatalf("XOP: expected %v, got %v", expected, got) + } + t.Log("XOP Support:", got) +} + +// TestF16C tests F16C() function +func TestF16C(t *testing.T) { + got := cpu.f16c() + expected := cpu.features&f16c == f16c + if got != expected { + t.Fatalf("F16C: expected %v, got %v", expected, got) + } + t.Log("F16C Support:", got) +} + +// TestCX16 tests CX16() function +func TestCX16(t *testing.T) { + got := cpu.cx16() + expected := cpu.features&cx16 == cx16 + if got != expected { + t.Fatalf("CX16: expected %v, got %v", expected, got) + } + t.Log("CX16 Support:", got) +} + +// TestSGX tests SGX() function +func TestSGX(t *testing.T) { + got := cpu.sgx.available + expected := cpu.features&sgx == sgx + if got != expected { + t.Fatalf("SGX: expected %v, got %v", expected, got) + } + t.Log("SGX Support:", got) +} + +// TestBMI1 tests BMI1() function +func TestBMI1(t *testing.T) { + got := cpu.bmi1() + expected := cpu.features&bmi1 == bmi1 + if got != expected { + t.Fatalf("BMI1: expected %v, got %v", expected, got) + } + t.Log("BMI1 Support:", got) +} + +// TestBMI2 tests BMI2() function +func TestBMI2(t *testing.T) { + got := cpu.bmi2() + expected := cpu.features&bmi2 == bmi2 + if got != expected { + t.Fatalf("BMI2: expected %v, got %v", expected, got) + } + t.Log("BMI2 Support:", got) +} + +// TestTBM tests TBM() function +func TestTBM(t *testing.T) { + got := cpu.tbm() + expected := cpu.features&tbm == tbm + if got != expected { + t.Fatalf("TBM: expected %v, got %v", expected, got) + } + t.Log("TBM Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestLzcnt(t *testing.T) { + got := cpu.lzcnt() + expected := cpu.features&lzcnt == lzcnt + if got != expected { + t.Fatalf("Lzcnt: expected %v, got %v", expected, got) + } + t.Log("LZCNT Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestPopcnt(t *testing.T) { + got := cpu.popcnt() + expected := cpu.features&popcnt == popcnt + if got != expected { + t.Fatalf("Popcnt: expected %v, got %v", expected, got) + } + t.Log("POPCNT Support:", got) +} + +// TestAesNi tests AesNi() function +func TestAesNi(t *testing.T) { + got := cpu.aesni() + expected := cpu.features&aesni == aesni + if got != expected { + t.Fatalf("AesNi: expected %v, got %v", expected, got) + } + t.Log("AESNI Support:", got) +} + +// TestHTT tests HTT() function +func TestHTT(t *testing.T) { + got := cpu.htt() + expected := cpu.features&htt == htt + if got != expected { + t.Fatalf("HTT: expected %v, got %v", expected, got) + } + t.Log("HTT Support:", got) +} + +// TestClmul tests Clmul() function +func TestClmul(t *testing.T) { + got := cpu.clmul() + expected := cpu.features&clmul == clmul + if got != expected { + t.Fatalf("Clmul: expected %v, got %v", expected, got) + } + t.Log("CLMUL Support:", got) +} + +// TestSSE2Slow tests SSE2Slow() function +func TestSSE2Slow(t *testing.T) { + got := cpu.sse2slow() + expected := cpu.features&sse2slow == sse2slow + if got != expected { + t.Fatalf("SSE2Slow: expected %v, got %v", expected, got) + } + t.Log("SSE2SLOW Support:", got) +} + +// TestSSE3Slow tests SSE3slow() function +func TestSSE3Slow(t *testing.T) { + got := cpu.sse3slow() + expected := cpu.features&sse3slow == sse3slow + if got != expected { + t.Fatalf("SSE3slow: expected %v, got %v", expected, got) + } + t.Log("SSE3SLOW Support:", got) +} + +// TestAtom tests Atom() function +func TestAtom(t *testing.T) { + got := cpu.atom() + expected := cpu.features&atom == atom + if got != expected { + t.Fatalf("Atom: expected %v, got %v", expected, got) + } + t.Log("ATOM Support:", got) +} + +// TestNX tests NX() function (NX (No-Execute) bit) +func TestNX(t *testing.T) { + got := cpu.nx() + expected := cpu.features&nx == nx + if got != expected { + t.Fatalf("NX: expected %v, got %v", expected, got) + } + t.Log("NX Support:", got) +} + +// TestSSE4A tests SSE4A() function (AMD Barcelona microarchitecture SSE4a instructions) +func TestSSE4A(t *testing.T) { + got := cpu.sse4a() + expected := cpu.features&sse4a == sse4a + if got != expected { + t.Fatalf("SSE4A: expected %v, got %v", expected, got) + } + t.Log("SSE4A Support:", got) +} + +// TestHLE tests HLE() function (Hardware Lock Elision) +func TestHLE(t *testing.T) { + got := cpu.hle() + expected := cpu.features&hle == hle + if got != expected { + t.Fatalf("HLE: expected %v, got %v", expected, got) + } + t.Log("HLE Support:", got) +} + +// TestRTM tests RTM() function (Restricted Transactional Memory) +func TestRTM(t *testing.T) { + got := cpu.rtm() + expected := cpu.features&rtm == rtm + if got != expected { + t.Fatalf("RTM: expected %v, got %v", expected, got) + } + t.Log("RTM Support:", got) +} + +// TestRdrand tests RDRAND() function (RDRAND instruction is available) +func TestRdrand(t *testing.T) { + got := cpu.rdrand() + expected := cpu.features&rdrand == rdrand + if got != expected { + t.Fatalf("Rdrand: expected %v, got %v", expected, got) + } + t.Log("Rdrand Support:", got) +} + +// TestRdseed tests RDSEED() function (RDSEED instruction is available) +func TestRdseed(t *testing.T) { + got := cpu.rdseed() + expected := cpu.features&rdseed == rdseed + if got != expected { + t.Fatalf("Rdseed: expected %v, got %v", expected, got) + } + t.Log("Rdseed Support:", got) +} + +// TestADX tests ADX() function (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +func TestADX(t *testing.T) { + got := cpu.adx() + expected := cpu.features&adx == adx + if got != expected { + t.Fatalf("ADX: expected %v, got %v", expected, got) + } + t.Log("ADX Support:", got) +} + +// TestSHA tests SHA() function (Intel SHA Extensions) +func TestSHA(t *testing.T) { + got := cpu.sha() + expected := cpu.features&sha == sha + if got != expected { + t.Fatalf("SHA: expected %v, got %v", expected, got) + } + t.Log("SHA Support:", got) +} + +// TestAVX512F tests AVX512F() function (AVX-512 Foundation) +func TestAVX512F(t *testing.T) { + got := cpu.avx512f() + expected := cpu.features&avx512f == avx512f + if got != expected { + t.Fatalf("AVX512F: expected %v, got %v", expected, got) + } + t.Log("AVX512F Support:", got) +} + +// TestAVX512DQ tests AVX512DQ() function (AVX-512 Doubleword and Quadword Instructions) +func TestAVX512DQ(t *testing.T) { + got := cpu.avx512dq() + expected := cpu.features&avx512dq == avx512dq + if got != expected { + t.Fatalf("AVX512DQ: expected %v, got %v", expected, got) + } + t.Log("AVX512DQ Support:", got) +} + +// TestAVX512IFMA tests AVX512IFMA() function (AVX-512 Integer Fused Multiply-Add Instructions) +func TestAVX512IFMA(t *testing.T) { + got := cpu.avx512ifma() + expected := cpu.features&avx512ifma == avx512ifma + if got != expected { + t.Fatalf("AVX512IFMA: expected %v, got %v", expected, got) + } + t.Log("AVX512IFMA Support:", got) +} + +// TestAVX512PF tests AVX512PF() function (AVX-512 Prefetch Instructions) +func TestAVX512PF(t *testing.T) { + got := cpu.avx512pf() + expected := cpu.features&avx512pf == avx512pf + if got != expected { + t.Fatalf("AVX512PF: expected %v, got %v", expected, got) + } + t.Log("AVX512PF Support:", got) +} + +// TestAVX512ER tests AVX512ER() function (AVX-512 Exponential and Reciprocal Instructions) +func TestAVX512ER(t *testing.T) { + got := cpu.avx512er() + expected := cpu.features&avx512er == avx512er + if got != expected { + t.Fatalf("AVX512ER: expected %v, got %v", expected, got) + } + t.Log("AVX512ER Support:", got) +} + +// TestAVX512CD tests AVX512CD() function (AVX-512 Conflict Detection Instructions) +func TestAVX512CD(t *testing.T) { + got := cpu.avx512cd() + expected := cpu.features&avx512cd == avx512cd + if got != expected { + t.Fatalf("AVX512CD: expected %v, got %v", expected, got) + } + t.Log("AVX512CD Support:", got) +} + +// TestAVX512BW tests AVX512BW() function (AVX-512 Byte and Word Instructions) +func TestAVX512BW(t *testing.T) { + got := cpu.avx512bw() + expected := cpu.features&avx512bw == avx512bw + if got != expected { + t.Fatalf("AVX512BW: expected %v, got %v", expected, got) + } + t.Log("AVX512BW Support:", got) +} + +// TestAVX512VL tests AVX512VL() function (AVX-512 Vector Length Extensions) +func TestAVX512VL(t *testing.T) { + got := cpu.avx512vl() + expected := cpu.features&avx512vl == avx512vl + if got != expected { + t.Fatalf("AVX512VL: expected %v, got %v", expected, got) + } + t.Log("AVX512VL Support:", got) +} + +// TestAVX512VL tests AVX512VBMI() function (AVX-512 Vector Bit Manipulation Instructions) +func TestAVX512VBMI(t *testing.T) { + got := cpu.avx512vbmi() + expected := cpu.features&avx512vbmi == avx512vbmi + if got != expected { + t.Fatalf("AVX512VBMI: expected %v, got %v", expected, got) + } + t.Log("AVX512VBMI Support:", got) +} + +// TestMPX tests MPX() function (Intel MPX (Memory Protection Extensions)) +func TestMPX(t *testing.T) { + got := cpu.mpx() + expected := cpu.features&mpx == mpx + if got != expected { + t.Fatalf("MPX: expected %v, got %v", expected, got) + } + t.Log("MPX Support:", got) +} + +// TestERMS tests ERMS() function (Enhanced REP MOVSB/STOSB) +func TestERMS(t *testing.T) { + got := cpu.erms() + expected := cpu.features&erms == erms + if got != expected { + t.Fatalf("ERMS: expected %v, got %v", expected, got) + } + t.Log("ERMS Support:", got) +} + +// TestVendor writes the detected vendor. Will be 0 if unknown +func TestVendor(t *testing.T) { + t.Log("Vendor ID:", cpu.vendorid) +} + +// Intel returns true if vendor is recognized as Intel +func TestIntel(t *testing.T) { + got := cpu.intel() + expected := cpu.vendorid == intel + if got != expected { + t.Fatalf("TestIntel: expected %v, got %v", expected, got) + } + t.Log("TestIntel:", got) +} + +// AMD returns true if vendor is recognized as AMD +func TestAMD(t *testing.T) { + got := cpu.amd() + expected := cpu.vendorid == amd + if got != expected { + t.Fatalf("TestAMD: expected %v, got %v", expected, got) + } + t.Log("TestAMD:", got) +} + +// Transmeta returns true if vendor is recognized as Transmeta +func TestTransmeta(t *testing.T) { + got := cpu.transmeta() + expected := cpu.vendorid == transmeta + if got != expected { + t.Fatalf("TestTransmeta: expected %v, got %v", expected, got) + } + t.Log("TestTransmeta:", got) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestNSC(t *testing.T) { + got := cpu.nsc() + expected := cpu.vendorid == nsc + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// VIA returns true if vendor is recognized as VIA +func TestVIA(t *testing.T) { + got := cpu.via() + expected := cpu.vendorid == via + if got != expected { + t.Fatalf("TestVIA: expected %v, got %v", expected, got) + } + t.Log("TestVIA:", got) +} + +// Test VM function +func TestVM(t *testing.T) { + t.Log("Vendor ID:", cpu.vm()) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestCPUInfo_TSX(t *testing.T) { + got := cpu.tsx() + expected := cpu.hle() && cpu.rtm() + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// Test RTCounter function +func TestRtCounter(t *testing.T) { + a := cpu.rtcounter() + b := cpu.rtcounter() + t.Log("CPU Counter:", a, b, b-a) +} + +// Prints the value of Ia32TscAux() +func TestIa32TscAux(t *testing.T) { + ecx := cpu.ia32tscaux() + t.Logf("Ia32TscAux:0x%x\n", ecx) + if ecx != 0 { + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + t.Log("Likely chip, core:", chip, core) + } +} + +func TestThreadsPerCoreNZ(t *testing.T) { + if cpu.threadspercore == 0 { + t.Fatal("threads per core is zero") + } +} + +// Prints the value of LogicalCPU() +func TestLogicalCPU(t *testing.T) { + t.Log("Currently executing on cpu:", cpu.logicalcpu()) +} + +func TestMaxFunction(t *testing.T) { + expect := maxFunctionID() + if cpu.maxFunc != expect { + t.Fatal("Max function does not match, expected", expect, "but got", cpu.maxFunc) + } + expect = maxExtendedFunction() + if cpu.maxExFunc != expect { + t.Fatal("Max Extended function does not match, expected", expect, "but got", cpu.maxFunc) + } +} + +// This example will calculate the chip/core number on Linux +// Linux encodes numa id (<<12) and core id (8bit) into TSC_AUX. +func examplecpuinfo_ia32tscaux() { + ecx := cpu.ia32tscaux() + if ecx == 0 { + fmt.Println("Unknown CPU ID") + return + } + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + fmt.Println("Chip, Core:", chip, core) +} + +/* +func TestPhysical(t *testing.T) { + var test16 = "CPUID 00000000: 0000000d-756e6547-6c65746e-49656e69 \nCPUID 00000001: 000206d7-03200800-1fbee3ff-bfebfbff \nCPUID 00000002: 76035a01-00f0b2ff-00000000-00ca0000 \nCPUID 00000003: 00000000-00000000-00000000-00000000 \nCPUID 00000004: 3c004121-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004122-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004143-01c0003f-000001ff-00000000 \nCPUID 00000004: 3c07c163-04c0003f-00003fff-00000006 \nCPUID 00000005: 00000040-00000040-00000003-00021120 \nCPUID 00000006: 00000075-00000002-00000009-00000000 \nCPUID 00000007: 00000000-00000000-00000000-00000000 \nCPUID 00000008: 00000000-00000000-00000000-00000000 \nCPUID 00000009: 00000001-00000000-00000000-00000000 \nCPUID 0000000a: 07300403-00000000-00000000-00000603 \nCPUID 0000000b: 00000000-00000000-00000003-00000003 \nCPUID 0000000b: 00000005-00000010-00000201-00000003 \nCPUID 0000000c: 00000000-00000000-00000000-00000000 \nCPUID 0000000d: 00000007-00000340-00000340-00000000 \nCPUID 0000000d: 00000001-00000000-00000000-00000000 \nCPUID 0000000d: 00000100-00000240-00000000-00000000 \nCPUID 80000000: 80000008-00000000-00000000-00000000 \nCPUID 80000001: 00000000-00000000-00000001-2c100800 \nCPUID 80000002: 20202020-49202020-6c65746e-20295228 \nCPUID 80000003: 6e6f6558-20295228-20555043-322d3545 \nCPUID 80000004: 20303636-20402030-30322e32-007a4847 \nCPUID 80000005: 00000000-00000000-00000000-00000000 \nCPUID 80000006: 00000000-00000000-01006040-00000000 \nCPUID 80000007: 00000000-00000000-00000000-00000100 \nCPUID 80000008: 0000302e-00000000-00000000-00000000" + restore := mockCPU([]byte(test16)) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + Detect() + TestCPUID(t) +} +*/ diff --git a/vendor/github.com/klauspost/cpuid/testdata/cpuid_data.zip b/vendor/github.com/klauspost/cpuid/testdata/cpuid_data.zip new file mode 100644 index 00000000000..69432430963 Binary files /dev/null and b/vendor/github.com/klauspost/cpuid/testdata/cpuid_data.zip differ diff --git a/vendor/github.com/klauspost/cpuid/testdata/getall.go b/vendor/github.com/klauspost/cpuid/testdata/getall.go new file mode 100644 index 00000000000..9b51c7aa8d2 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/testdata/getall.go @@ -0,0 +1,77 @@ +package main + +import ( + "archive/zip" + _ "bytes" + "fmt" + "golang.org/x/net/html" + "io" + "net/http" + "os" + "strings" +) + +// Download all CPUID dumps from http://users.atw.hu/instlatx64/ +func main() { + resp, err := http.Get("http://users.atw.hu/instlatx64/?") + if err != nil { + panic(err) + } + + node, err := html.Parse(resp.Body) + if err != nil { + panic(err) + } + + file, err := os.Create("cpuid_data.zip") + if err != nil { + panic(err) + } + defer file.Close() + gw := zip.NewWriter(file) + + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + for _, a := range n.Attr { + if a.Key == "href" { + err := ParseURL(a.Val, gw) + if err != nil { + panic(err) + } + break + } + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + + f(node) + err = gw.Close() + if err != nil { + panic(err) + } +} + +func ParseURL(s string, gw *zip.Writer) error { + if strings.Contains(s, "CPUID.txt") { + fmt.Println("Adding", "http://users.atw.hu/instlatx64/"+s) + resp, err := http.Get("http://users.atw.hu/instlatx64/" + s) + if err != nil { + fmt.Println("Error getting ", s, ":", err) + } + defer resp.Body.Close() + w, err := gw.Create(s) + if err != nil { + return err + } + + _, err = io.Copy(w, resp.Body) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/klauspost/pgzip/.gitignore b/vendor/github.com/klauspost/pgzip/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/pgzip/.travis.yml b/vendor/github.com/klauspost/pgzip/.travis.yml new file mode 100644 index 00000000000..6e9fca0bac3 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/.travis.yml @@ -0,0 +1,21 @@ +language: go + +sudo: false + +os: + - linux + - osx + +go: + - 1.9.x + - 1.10.x + - master + +script: + - go test -v -cpu=1,2,4 . + - go test -v -cpu=2 -race -short . + +matrix: + allow_failures: + - go: 'master' + fast_finish: true diff --git a/vendor/github.com/klauspost/pgzip/GO_LICENSE b/vendor/github.com/klauspost/pgzip/GO_LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/GO_LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE new file mode 100644 index 00000000000..2bdc0d75171 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md new file mode 100644 index 00000000000..81000996c97 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/README.md @@ -0,0 +1,136 @@ +pgzip +===== + +Go parallel gzip compression/decompression. This is a fully gzip compatible drop in replacement for "compress/gzip". + +This will split compression into blocks that are compressed in parallel. +This can be useful for compressing big amounts of data. The output is a standard gzip file. + +The gzip decompression is modified so it decompresses ahead of the current reader. +This means that reads will be non-blocking if the decompressor can keep ahead of your code reading from it. +CRC calculation also takes place in a separate goroutine. + +You should only use this if you are (de)compressing big amounts of data, +say **more than 1MB** at the time, otherwise you will not see any benefit, +and it will likely be faster to use the internal gzip library +or [this package](https://github.com/klauspost/compress). + +It is important to note that this library creates and reads *standard gzip files*. +You do not have to match the compressor/decompressor to get the described speedups, +and the gzip files are fully compatible with other gzip readers/writers. + +A golang variant of this is [bgzf](https://godoc.org/github.com/biogo/hts/bgzf), +which has the same feature, as well as seeking in the resulting file. +The only drawback is a slightly bigger overhead compared to this and pure gzip. +See a comparison below. + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/pgzip?status.svg +[2]: https://godoc.org/github.com/klauspost/pgzip +[3]: https://travis-ci.org/klauspost/pgzip.svg +[4]: https://travis-ci.org/klauspost/pgzip + +Installation +==== +```go get github.com/klauspost/pgzip/...``` + +You might need to get/update the dependencies: + +``` +go get -u github.com/klauspost/compress +go get -u github.com/klauspost/crc32 +``` + +Usage +==== +[Godoc Doumentation](https://godoc.org/github.com/klauspost/pgzip) + +To use as a replacement for gzip, exchange + +```import "compress/gzip"``` +with +```import gzip "github.com/klauspost/pgzip"```. + +# Changes + +* Oct 6, 2016: Fixed an issue if the destination writer returned an error. +* Oct 6, 2016: Better buffer reuse, should now generate less garbage. +* Oct 6, 2016: Output does not change based on write sizes. +* Dec 8, 2015: Decoder now supports the io.WriterTo interface, giving a speedup and less GC pressure. +* Oct 9, 2015: Reduced allocations by ~35 by using sync.Pool. ~15% overall speedup. + +Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress#changelog) are also carried over, so see that for more changes. + +## Compression +The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip). + +To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(250000, 16), meaning blocks are split at 250000 bytes and up to 16 blocks can be processing at once before the writer blocks. + + +Example: +``` +var b bytes.Buffer +w := gzip.NewWriter(&b) +w.SetConcurrency(100000, 10) +w.Write([]byte("hello, world\n")) +w.Close() +``` + +To get any performance gains, you should at least be compressing more than 1 megabyte of data at the time. + +You should at least have a block size of 100k and at least a number of blocks that match the number of cores your would like to utilize, but about twice the number of blocks would be the best. + +Another side effect of this is, that it is likely to speed up your other code, since writes to the compressor only blocks if the compressor is already compressing the number of blocks you have specified. This also means you don't have worry about buffering input to the compressor. + +## Decompression + +Decompression works similar to compression. That means that you simply call pgzip the same way as you would call [compress/gzip](http://golang.org/pkg/compress/gzip). + +The only difference is that if you want to specify your own readahead, you have to use `pgzip.NewReaderN(r io.Reader, blockSize, blocks int)` to get a reader with your custom blocksizes. The `blockSize` is the size of each block decoded, and `blocks` is the maximum number of blocks that is decoded ahead. + +See [Example on playground](http://play.golang.org/p/uHv1B5NbDh) + +Performance +==== +## Compression + +See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gzipdeflate-benchmarks/). + +Compression cost is usually about 0.2% with default settings with a block size of 250k. + +Example with GOMAXPROC set to 8 (quad core with 8 hyperthreads) + +Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6. + +Compressor | MB/sec | speedup | size | size overhead (lower=better) +------------|----------|---------|------|--------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 7.21MB/s | 1.0x | 4786608902 | 0% +[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 10.98MB/s | 1.52x | 4781331645 | -0.11% +[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 50.76MB/s|7.04x | 4784121440 | -0.052% +[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 38.65MB/s | 5.36x | 4924899484 | 2.889% +[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 32.00MB/s | 4.44x | 4791226567 | 0.096% + +pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression) mode, that will allow compression at ~150MB per core per second, independent of the content. + +See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings. + +## Decompression + +The decompression speedup is there because it allows you to do other work while the decompression is taking place. + +In the example above, the numbers are as follows on a 4 CPU machine: + +Decompressor | Time | Speedup +-------------|------|-------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 1m28.85s | 0% +[pgzip](https://github.com/klauspost/pgzip) (golang) | 43.48s | 104% + +But wait, since gzip decompression is inherently singlethreaded (aside from CRC calculation) how can it be more than 100% faster? Because pgzip due to its design also acts as a buffer. When using unbuffered gzip, you are also waiting for io when you are decompressing. If the gzip decoder can keep up, it will always have data ready for your reader, and you will not be waiting for input to the gzip decompressor to complete. + +This is pretty much an optimal situation for pgzip, but it reflects most common usecases for CPU intensive gzip usage. + +I haven't included [bgzf](https://godoc.org/github.com/biogo/hts/bgzf) in this comparison, since it only can decompress files created by a compatible encoder, and therefore cannot be considered a generic gzip decompressor. But if you are able to compress your files with a bgzf compatible program, you can expect it to scale beyond 100%. + +# License +This contains large portions of code from the go repository - see GO_LICENSE for more information. The changes are released under MIT License. See LICENSE for more information. diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go new file mode 100644 index 00000000000..93efec71489 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gunzip.go @@ -0,0 +1,573 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pgzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +// +// This is a drop in replacement for "compress/gzip". +// This will split compression into blocks that are compressed in parallel. +// This can be useful for compressing big amounts of data. +// The gzip decompression has not been modified, but remains in the package, +// so you can use it as a complete replacement for "compress/gzip". +// +// See more at https://github.com/klauspost/pgzip +package pgzip + +import ( + "bufio" + "errors" + "hash" + "hash/crc32" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +func makeReader(r io.Reader) flate.Reader { + if rr, ok := r.(flate.Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = errors.New("gzip: invalid checksum") + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = errors.New("gzip: invalid header") +) + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + size uint32 + flg byte + buf [512]byte + err error + closeErr chan error + multistream bool + + readAhead chan read + roff int // read offset + current []byte + closeReader chan struct{} + lastBlock bool + blockSize int + blocks int + + activeRA bool // Indication if readahead is active + mu sync.Mutex // Lock for above + + blockPool chan []byte +} + +type read struct { + b []byte + err error +} + +// NewReader creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + z.blocks = defaultBlocks + z.blockSize = defaultBlockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// NewReaderN creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +// +// With this you can control the approximate size of your blocks, +// as well as how many blocks you want to have prefetched. +// +// Default values for this is blockSize = 250000, blocks = 16, +// meaning up to 16 blocks of maximum 250000 bytes will be +// prefetched. +func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) { + z := new(Reader) + z.blocks = blocks + z.blockSize = blockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + + // Account for too small values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + z.killReadAhead() + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.size = 0 + z.err = nil + z.multistream = true + + // Account for uninitialized values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + + if z.blockPool == nil { + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + } + + return z.readHeader(true) +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func get4(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func (z *Reader) readString() (string, error) { + var err error + needconv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needconv = true + } + if z.buf[i] == 0 { + // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). + if needconv { + s := make([]rune, 0, i) + for _, v := range z.buf[0:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[0:i]), nil + } + } +} + +func (z *Reader) read2() (uint32, error) { + _, err := io.ReadFull(z.r, z.buf[0:2]) + if err != nil { + return 0, err + } + return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil +} + +func (z *Reader) readHeader(save bool) error { + z.killReadAhead() + + _, err := io.ReadFull(z.r, z.buf[0:10]) + if err != nil { + return err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return ErrHeader + } + z.flg = z.buf[3] + if save { + z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) + // z.buf[8] is xfl, ignored + z.OS = z.buf[9] + } + z.digest.Reset() + z.digest.Write(z.buf[0:10]) + + if z.flg&flagExtra != 0 { + n, err := z.read2() + if err != nil { + return err + } + data := make([]byte, n) + if _, err = io.ReadFull(z.r, data); err != nil { + return err + } + if save { + z.Extra = data + } + } + + var s string + if z.flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Name = s + } + } + + if z.flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Comment = s + } + } + + if z.flg&flagHdrCrc != 0 { + n, err := z.read2() + if err != nil { + return err + } + sum := z.digest.Sum32() & 0xFFFF + if n != sum { + return ErrHeader + } + } + + z.digest.Reset() + z.decompressor = flate.NewReader(z.r) + z.doReadAhead() + return nil +} + +func (z *Reader) killReadAhead() error { + z.mu.Lock() + defer z.mu.Unlock() + if z.activeRA { + if z.closeReader != nil { + close(z.closeReader) + } + + // Wait for decompressor to be closed and return error, if any. + e, ok := <-z.closeErr + z.activeRA = false + if !ok { + // Channel is closed, so if there was any error it has already been returned. + return nil + } + return e + } + return nil +} + +// Starts readahead. +// Will return on error (including io.EOF) +// or when z.closeReader is closed. +func (z *Reader) doReadAhead() { + z.mu.Lock() + defer z.mu.Unlock() + z.activeRA = true + + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + ra := make(chan read, z.blocks) + z.readAhead = ra + closeReader := make(chan struct{}, 0) + z.closeReader = closeReader + z.lastBlock = false + closeErr := make(chan error, 1) + z.closeErr = closeErr + z.size = 0 + z.roff = 0 + z.current = nil + decomp := z.decompressor + + go func() { + defer func() { + closeErr <- decomp.Close() + close(closeErr) + close(ra) + }() + + // We hold a local reference to digest, since + // it way be changed by reset. + digest := z.digest + var wg sync.WaitGroup + for { + var buf []byte + select { + case buf = <-z.blockPool: + case <-closeReader: + return + } + buf = buf[0:z.blockSize] + // Try to fill the buffer + n, err := io.ReadFull(decomp, buf) + if err == io.ErrUnexpectedEOF { + if n > 0 { + err = nil + } else { + // If we got zero bytes, we need to establish if + // we reached end of stream or truncated stream. + _, err = decomp.Read([]byte{}) + if err == io.EOF { + err = nil + } + } + } + if n < len(buf) { + buf = buf[0:n] + } + wg.Wait() + wg.Add(1) + go func() { + digest.Write(buf) + wg.Done() + }() + z.size += uint32(n) + + // If we return any error, out digest must be ready + if err != nil { + wg.Wait() + } + select { + case z.readAhead <- read{b: buf, err: err}: + case <-closeReader: + // Sent on close, we don't care about the next results + return + } + if err != nil { + return + } + } + }() +} + +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + + for { + if len(z.current) == 0 && !z.lastBlock { + read := <-z.readAhead + + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + z.current = read.b + z.roff = 0 + } + avail := z.current[z.roff:] + if len(p) >= len(avail) { + // If len(p) >= len(current), return all content of current + n = copy(p, avail) + z.blockPool <- z.current + z.current = nil + if z.lastBlock { + err = io.EOF + break + } + } else { + // We copy as much as there is space for + n = copy(p, avail) + z.roff += n + } + return + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return 0, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return 0, z.err + } + + // File is ok; should we attempt reading one more? + if !z.multistream { + return 0, io.EOF + } + + // Is there another? + if err = z.readHeader(false); err != nil { + z.err = err + return + } + + // Yes. Reset and read from it. + return z.Read(p) +} + +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + total := int64(0) + for { + if z.err != nil { + return total, z.err + } + // We write both to output and digest. + for { + // Read from input + read := <-z.readAhead + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return total, z.err + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + // Write what we got + n, err := w.Write(read.b) + if n != len(read.b) { + return total, io.ErrShortWrite + } + total += int64(n) + if err != nil { + return total, err + } + // Put block back + z.blockPool <- read.b + if z.lastBlock { + break + } + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return total, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return total, z.err + } + // File is ok; should we attempt reading one more? + if !z.multistream { + return total, nil + } + + // Is there another? + err = z.readHeader(false) + if err == io.EOF { + return total, nil + } + if err != nil { + z.err = err + return total, err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +func (z *Reader) Close() error { + return z.killReadAhead() +} diff --git a/vendor/github.com/klauspost/pgzip/gunzip_test.go b/vendor/github.com/klauspost/pgzip/gunzip_test.go new file mode 100644 index 00000000000..8e7d12e9f1b --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gunzip_test.go @@ -0,0 +1,669 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pgzip + +import ( + "bytes" + oldgz "compress/gzip" + "crypto/rand" + "io" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + kpgzip "github.com/klauspost/compress/gzip" +) + +type gunzipTest struct { + name string + desc string + raw string + gzip []byte + err error +} + +var gunzipTests = []gunzipTest{ + { // has 1 empty fixed-huffman block + "empty.txt", + "empty.txt", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xf7, 0x5e, 0x14, 0x4a, + 0x00, 0x03, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block + "hello.txt", + "hello.txt", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // concatenation + "hello.txt", + "hello.txt x2", + "hello world\n" + + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // has a fixed huffman block with some length-distance pairs + "shesells.txt", + "shesells.txt", + "she sells seashells by the seashore\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x72, 0x66, 0x8b, 0x4a, + 0x00, 0x03, 0x73, 0x68, 0x65, 0x73, 0x65, 0x6c, + 0x6c, 0x73, 0x2e, 0x74, 0x78, 0x74, 0x00, 0x2b, + 0xce, 0x48, 0x55, 0x28, 0x4e, 0xcd, 0xc9, 0x29, + 0x06, 0x92, 0x89, 0xc5, 0x19, 0x60, 0x56, 0x52, + 0xa5, 0x42, 0x09, 0x58, 0x18, 0x28, 0x90, 0x5f, + 0x94, 0xca, 0x05, 0x00, 0x76, 0xb0, 0x3b, 0xeb, + 0x24, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has dynamic huffman blocks + "gettysburg", + "gettysburg", + " Four score and seven years ago our fathers brought forth on\n" + + "this continent, a new nation, conceived in Liberty, and dedicated\n" + + "to the proposition that all men are created equal.\n" + + " Now we are engaged in a great Civil War, testing whether that\n" + + "nation, or any nation so conceived and so dedicated, can long\n" + + "endure.\n" + + " We are met on a great battle-field of that war.\n" + + " We have come to dedicate a portion of that field, as a final\n" + + "resting place for those who here gave their lives that that\n" + + "nation might live. It is altogether fitting and proper that\n" + + "we should do this.\n" + + " But, in a larger sense, we can not dedicate — we can not\n" + + "consecrate — we can not hallow — this ground.\n" + + " The brave men, living and dead, who struggled here, have\n" + + "consecrated it, far above our poor power to add or detract.\n" + + "The world will little note, nor long remember what we say here,\n" + + "but it can never forget what they did here.\n" + + " It is for us the living, rather, to be dedicated here to the\n" + + "unfinished work which they who fought here have thus far so\n" + + "nobly advanced. It is rather for us to be here dedicated to\n" + + "the great task remaining before us — that from these honored\n" + + "dead we take increased devotion to that cause for which they\n" + + "gave the last full measure of devotion —\n" + + " that we here highly resolve that these dead shall not have\n" + + "died in vain — that this nation, under God, shall have a new\n" + + "birth of freedom — and that government of the people, by the\n" + + "people, for the people, shall not perish from this earth.\n" + + "\n" + + "Abraham Lincoln, November 19, 1863, Gettysburg, Pennsylvania\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xd1, 0x12, 0x2b, 0x4a, + 0x00, 0x03, 0x67, 0x65, 0x74, 0x74, 0x79, 0x73, + 0x62, 0x75, 0x72, 0x67, 0x00, 0x65, 0x54, 0xcd, + 0x6e, 0xd4, 0x30, 0x10, 0xbe, 0xfb, 0x29, 0xe6, + 0x01, 0x42, 0xa5, 0x0a, 0x09, 0xc1, 0x11, 0x90, + 0x40, 0x48, 0xa8, 0xe2, 0x80, 0xd4, 0xf3, 0x24, + 0x9e, 0x24, 0x56, 0xbd, 0x9e, 0xc5, 0x76, 0x76, + 0x95, 0x1b, 0x0f, 0xc1, 0x13, 0xf2, 0x24, 0x7c, + 0x63, 0x77, 0x9b, 0x4a, 0x5c, 0xaa, 0x6e, 0x6c, + 0xcf, 0x7c, 0x7f, 0x33, 0x44, 0x5f, 0x74, 0xcb, + 0x54, 0x26, 0xcd, 0x42, 0x9c, 0x3c, 0x15, 0xb9, + 0x48, 0xa2, 0x5d, 0x38, 0x17, 0xe2, 0x45, 0xc9, + 0x4e, 0x67, 0xae, 0xab, 0xe0, 0xf7, 0x98, 0x75, + 0x5b, 0xd6, 0x4a, 0xb3, 0xe6, 0xba, 0x92, 0x26, + 0x57, 0xd7, 0x50, 0x68, 0xd2, 0x54, 0x43, 0x92, + 0x54, 0x07, 0x62, 0x4a, 0x72, 0xa5, 0xc4, 0x35, + 0x68, 0x1a, 0xec, 0x60, 0x92, 0x70, 0x11, 0x4f, + 0x21, 0xd1, 0xf7, 0x30, 0x4a, 0xae, 0xfb, 0xd0, + 0x9a, 0x78, 0xf1, 0x61, 0xe2, 0x2a, 0xde, 0x55, + 0x25, 0xd4, 0xa6, 0x73, 0xd6, 0xb3, 0x96, 0x60, + 0xef, 0xf0, 0x9b, 0x2b, 0x71, 0x8c, 0x74, 0x02, + 0x10, 0x06, 0xac, 0x29, 0x8b, 0xdd, 0x25, 0xf9, + 0xb5, 0x71, 0xbc, 0x73, 0x44, 0x0f, 0x7a, 0xa5, + 0xab, 0xb4, 0x33, 0x49, 0x0b, 0x2f, 0xbd, 0x03, + 0xd3, 0x62, 0x17, 0xe9, 0x73, 0xb8, 0x84, 0x48, + 0x8f, 0x9c, 0x07, 0xaa, 0x52, 0x00, 0x6d, 0xa1, + 0xeb, 0x2a, 0xc6, 0xa0, 0x95, 0x76, 0x37, 0x78, + 0x9a, 0x81, 0x65, 0x7f, 0x46, 0x4b, 0x45, 0x5f, + 0xe1, 0x6d, 0x42, 0xe8, 0x01, 0x13, 0x5c, 0x38, + 0x51, 0xd4, 0xb4, 0x38, 0x49, 0x7e, 0xcb, 0x62, + 0x28, 0x1e, 0x3b, 0x82, 0x93, 0x54, 0x48, 0xf1, + 0xd2, 0x7d, 0xe4, 0x5a, 0xa3, 0xbc, 0x99, 0x83, + 0x44, 0x4f, 0x3a, 0x77, 0x36, 0x57, 0xce, 0xcf, + 0x2f, 0x56, 0xbe, 0x80, 0x90, 0x9e, 0x84, 0xea, + 0x51, 0x1f, 0x8f, 0xcf, 0x90, 0xd4, 0x60, 0xdc, + 0x5e, 0xb4, 0xf7, 0x10, 0x0b, 0x26, 0xe0, 0xff, + 0xc4, 0xd1, 0xe5, 0x67, 0x2e, 0xe7, 0xc8, 0x93, + 0x98, 0x05, 0xb8, 0xa8, 0x45, 0xc0, 0x4d, 0x09, + 0xdc, 0x84, 0x16, 0x2b, 0x0d, 0x9a, 0x21, 0x53, + 0x04, 0x8b, 0xd2, 0x0b, 0xbd, 0xa2, 0x4c, 0xa7, + 0x60, 0xee, 0xd9, 0xe1, 0x1d, 0xd1, 0xb7, 0x4a, + 0x30, 0x8f, 0x63, 0xd5, 0xa5, 0x8b, 0x33, 0x87, + 0xda, 0x1a, 0x18, 0x79, 0xf3, 0xe3, 0xa6, 0x17, + 0x94, 0x2e, 0xab, 0x6e, 0xa0, 0xe3, 0xcd, 0xac, + 0x50, 0x8c, 0xca, 0xa7, 0x0d, 0x76, 0x37, 0xd1, + 0x23, 0xe7, 0x05, 0x57, 0x8b, 0xa4, 0x22, 0x83, + 0xd9, 0x62, 0x52, 0x25, 0xad, 0x07, 0xbb, 0xbf, + 0xbf, 0xff, 0xbc, 0xfa, 0xee, 0x20, 0x73, 0x91, + 0x29, 0xff, 0x7f, 0x02, 0x71, 0x62, 0x84, 0xb5, + 0xf6, 0xb5, 0x25, 0x6b, 0x41, 0xde, 0x92, 0xb7, + 0x76, 0x3f, 0x91, 0x91, 0x31, 0x1b, 0x41, 0x84, + 0x62, 0x30, 0x0a, 0x37, 0xa4, 0x5e, 0x18, 0x3a, + 0x99, 0x08, 0xa5, 0xe6, 0x6d, 0x59, 0x22, 0xec, + 0x33, 0x39, 0x86, 0x26, 0xf5, 0xab, 0x66, 0xc8, + 0x08, 0x20, 0xcf, 0x0c, 0xd7, 0x47, 0x45, 0x21, + 0x0b, 0xf6, 0x59, 0xd5, 0xfe, 0x5c, 0x8d, 0xaa, + 0x12, 0x7b, 0x6f, 0xa1, 0xf0, 0x52, 0x33, 0x4f, + 0xf5, 0xce, 0x59, 0xd3, 0xab, 0x66, 0x10, 0xbf, + 0x06, 0xc4, 0x31, 0x06, 0x73, 0xd6, 0x80, 0xa2, + 0x78, 0xc2, 0x45, 0xcb, 0x03, 0x65, 0x39, 0xc9, + 0x09, 0xd1, 0x06, 0x04, 0x33, 0x1a, 0x5a, 0xf1, + 0xde, 0x01, 0xb8, 0x71, 0x83, 0xc4, 0xb5, 0xb3, + 0xc3, 0x54, 0x65, 0x33, 0x0d, 0x5a, 0xf7, 0x9b, + 0x90, 0x7c, 0x27, 0x1f, 0x3a, 0x58, 0xa3, 0xd8, + 0xfd, 0x30, 0x5f, 0xb7, 0xd2, 0x66, 0xa2, 0x93, + 0x1c, 0x28, 0xb7, 0xe9, 0x1b, 0x0c, 0xe1, 0x28, + 0x47, 0x26, 0xbb, 0xe9, 0x7d, 0x7e, 0xdc, 0x96, + 0x10, 0x92, 0x50, 0x56, 0x7c, 0x06, 0xe2, 0x27, + 0xb4, 0x08, 0xd3, 0xda, 0x7b, 0x98, 0x34, 0x73, + 0x9f, 0xdb, 0xf6, 0x62, 0xed, 0x31, 0x41, 0x13, + 0xd3, 0xa2, 0xa8, 0x4b, 0x3a, 0xc6, 0x1d, 0xe4, + 0x2f, 0x8c, 0xf8, 0xfb, 0x97, 0x64, 0xf4, 0xb6, + 0x2f, 0x80, 0x5a, 0xf3, 0x56, 0xe0, 0x40, 0x50, + 0xd5, 0x19, 0xd0, 0x1e, 0xfc, 0xca, 0xe5, 0xc9, + 0xd4, 0x60, 0x00, 0x81, 0x2e, 0xa3, 0xcc, 0xb6, + 0x52, 0xf0, 0xb4, 0xdb, 0x69, 0x99, 0xce, 0x7a, + 0x32, 0x4c, 0x08, 0xed, 0xaa, 0x10, 0x10, 0xe3, + 0x6f, 0xee, 0x99, 0x68, 0x95, 0x9f, 0x04, 0x71, + 0xb2, 0x49, 0x2f, 0x62, 0xa6, 0x5e, 0xb4, 0xef, + 0x02, 0xed, 0x4f, 0x27, 0xde, 0x4a, 0x0f, 0xfd, + 0xc1, 0xcc, 0xdd, 0x02, 0x8f, 0x08, 0x16, 0x54, + 0xdf, 0xda, 0xca, 0xe0, 0x82, 0xf1, 0xb4, 0x31, + 0x7a, 0xa9, 0x81, 0xfe, 0x90, 0xb7, 0x3e, 0xdb, + 0xd3, 0x35, 0xc0, 0x20, 0x80, 0x33, 0x46, 0x4a, + 0x63, 0xab, 0xd1, 0x0d, 0x29, 0xd2, 0xe2, 0x84, + 0xb8, 0xdb, 0xfa, 0xe9, 0x89, 0x44, 0x86, 0x7c, + 0xe8, 0x0b, 0xe6, 0x02, 0x6a, 0x07, 0x9b, 0x96, + 0xd0, 0xdb, 0x2e, 0x41, 0x4c, 0xa1, 0xd5, 0x57, + 0x45, 0x14, 0xfb, 0xe3, 0xa6, 0x72, 0x5b, 0x87, + 0x6e, 0x0c, 0x6d, 0x5b, 0xce, 0xe0, 0x2f, 0xe2, + 0x21, 0x81, 0x95, 0xb0, 0xe8, 0xb6, 0x32, 0x0b, + 0xb2, 0x98, 0x13, 0x52, 0x5d, 0xfb, 0xec, 0x63, + 0x17, 0x8a, 0x9e, 0x23, 0x22, 0x36, 0xee, 0xcd, + 0xda, 0xdb, 0xcf, 0x3e, 0xf1, 0xc7, 0xf1, 0x01, + 0x12, 0x93, 0x0a, 0xeb, 0x6f, 0xf2, 0x02, 0x15, + 0x96, 0x77, 0x5d, 0xef, 0x9c, 0xfb, 0x88, 0x91, + 0x59, 0xf9, 0x84, 0xdd, 0x9b, 0x26, 0x8d, 0x80, + 0xf9, 0x80, 0x66, 0x2d, 0xac, 0xf7, 0x1f, 0x06, + 0xba, 0x7f, 0xff, 0xee, 0xed, 0x40, 0x5f, 0xa5, + 0xd6, 0xbd, 0x8c, 0x5b, 0x46, 0xd2, 0x7e, 0x48, + 0x4a, 0x65, 0x8f, 0x08, 0x42, 0x60, 0xf7, 0x0f, + 0xb9, 0x16, 0x0b, 0x0c, 0x1a, 0x06, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block then garbage + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, 'g', 'a', 'r', 'b', 'a', 'g', 'e', '!', '!', '!', + }, + ErrHeader, + }, + { // has 1 non-empty fixed huffman block not enough header + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, gzipID1, + }, + io.ErrUnexpectedEOF, + }, + { // has 1 non-empty fixed huffman block but corrupt checksum + "hello.txt", + "hello.txt + corrupt checksum", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, + { // has 1 non-empty fixed huffman block but corrupt size + "hello.txt", + "hello.txt + corrupt size", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0xff, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, +} + +func TestDecompressor(t *testing.T) { + b := new(bytes.Buffer) + for _, tt := range gunzipTests { + in := bytes.NewReader(tt.gzip) + gzip, err := NewReader(in) + if err != nil { + t.Errorf("%s: NewReader: %s", tt.name, err) + continue + } + defer gzip.Close() + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err := io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s := b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + + // Test Reader Reset. + in = bytes.NewReader(tt.gzip) + err = gzip.Reset(in) + if err != nil { + t.Errorf("%s: Reset: %s", tt.name, err) + continue + } + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err = io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s = b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + } +} + +func TestIssue6550(t *testing.T) { + f, err := os.Open("testdata/issue6550.gz") + if err != nil { + t.Fatal(err) + } + gzip, err := NewReader(f) + if err != nil { + t.Fatalf("NewReader(testdata/issue6550.gz): %v", err) + } + defer gzip.Close() + done := make(chan bool, 1) + go func() { + _, err := io.Copy(ioutil.Discard, gzip) + if err == nil { + t.Errorf("Copy succeeded") + } else { + t.Logf("Copy failed (correctly): %v", err) + } + done <- true + }() + select { + case <-time.After(1 * time.Second): + t.Errorf("Copy hung") + case <-done: + // ok + } +} + +func TestInitialReset(t *testing.T) { + var r Reader + if err := r.Reset(bytes.NewReader(gunzipTests[1].gzip)); err != nil { + t.Error(err) + } + var buf bytes.Buffer + if _, err := io.Copy(&buf, &r); err != nil { + t.Error(err) + } + if s := buf.String(); s != gunzipTests[1].raw { + t.Errorf("got %q want %q", s, gunzipTests[1].raw) + } +} + +func TestMultistreamFalse(t *testing.T) { + // Find concatenation test. + var tt gunzipTest + for _, tt = range gunzipTests { + if strings.HasSuffix(tt.desc, " x2") { + goto Found + } + } + t.Fatal("cannot find hello.txt x2 in gunzip tests") + +Found: + br := bytes.NewReader(tt.gzip) + var r Reader + if err := r.Reset(br); err != nil { + t.Fatalf("first reset: %v", err) + } + + // Expect two streams with "hello world\n", then real EOF. + const hello = "hello world\n" + + r.Multistream(false) + data, err := ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != nil { + t.Fatalf("second reset: %v", err) + } + r.Multistream(false) + data, err = ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != io.EOF { + t.Fatalf("third reset: err=%v, want io.EOF", err) + } +} + +func TestWriteTo(t *testing.T) { + input := make([]byte, 100000) + n, err := rand.Read(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + compressed := &bytes.Buffer{} + // Do it twice to test MultiStream functionality + for i := 0; i < 2; i++ { + w, err := NewWriterLevel(compressed, -2) + if err != nil { + t.Fatal(err) + } + n, err = w.Write(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + w.Close() + } + input = append(input, input...) + buf := compressed.Bytes() + + dec, err := NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure. + readall, err := ioutil.ReadAll(ioutil.NopCloser(dec)) + if err != nil { + t.Fatal(err) + } + if len(readall) != len(input) { + t.Fatal("did not decompress everything") + } + if bytes.Compare(readall, input) != 0 { + t.Fatal("output did not match input") + } + + dec, err = NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + wtbuf := &bytes.Buffer{} + written, err := dec.WriteTo(wtbuf) + if err != nil { + t.Fatal(err) + } + if written != int64(len(input)) { + t.Error("Returned length did not match, expected", len(input), "got", written) + } + if wtbuf.Len() != len(input) { + t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len()) + } + if bytes.Compare(wtbuf.Bytes(), input) != 0 { + t.Fatal("output did not match input") + } +} + +func BenchmarkGunzipCopy(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := NewReader(bytes.NewBuffer(input)) + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err = r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipReadAll(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := NewReader(bytes.NewBuffer(input)) + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err = r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = ioutil.ReadAll(ioutil.NopCloser(r)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipStdLib(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := oldgz.NewReader(bytes.NewBuffer(input)) + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err = r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipFlate(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := kpgzip.NewReader(bytes.NewBuffer(input)) + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err = r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} + +func TestTruncatedGunzip(t *testing.T) { + in := []byte(strings.Repeat("ASDFASDFASDFASDFASDF", 1000)) + var buf bytes.Buffer + enc := kpgzip.NewWriter(&buf) + _, err := enc.Write(in) + if err != nil { + t.Fatal(err) + } + enc.Close() + testdata := buf.Bytes() + for i := 5; i < len(testdata); i += 10 { + timer := time.NewTimer(time.Second) + done := make(chan struct{}) + fail := make(chan struct{}) + go func() { + r, err := NewReader(bytes.NewBuffer(testdata[:i])) + if err == nil { + b, err := ioutil.ReadAll(r) + if err == nil && !bytes.Equal(testdata[:i], b) { + close(fail) + } + } + close(done) + }() + select { + case <-timer.C: + t.Fatal("Timeout decoding") + case <-fail: + t.Fatal("No error, but mismatch") + case <-done: + timer.Stop() + } + } +} + +func TestTruncatedGunzipBlocks(t *testing.T) { + var in = make([]byte, 512*10) + rand.Read(in) + var buf bytes.Buffer + for i := 0; i < len(in); i += 512 { + enc,_ := kpgzip.NewWriterLevel(&buf, 0) + _, err := enc.Write(in[:i]) + if err != nil { + t.Fatal(err) + } + enc.Close() + + timer := time.NewTimer(time.Second) + done := make(chan struct{}) + fail := make(chan struct{}) + go func() { + r, err := NewReaderN(&buf, 512, 10) + if err == nil { + b, err := ioutil.ReadAll(r) + if err == nil && !bytes.Equal(b, in[:i]) { + close(fail) + } + } + close(done) + }() + select { + case <-timer.C: + t.Fatal("Timeout decoding") + case <-fail: + t.Fatal("No error, but mismatch") + case <-done: + timer.Stop() + } + } +} diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go new file mode 100644 index 00000000000..85d14e9cbca --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gzip.go @@ -0,0 +1,501 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pgzip + +import ( + "bytes" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" +) + +const ( + defaultBlockSize = 256 << 10 + tailSize = 16384 + defaultBlocks = 16 +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header + w io.Writer + level int + wroteHeader bool + blockSize int + blocks int + currentBuffer []byte + prevTail []byte + digest hash.Hash32 + size int + closed bool + buf [10]byte + errMu sync.RWMutex + err error + pushedErr chan struct{} + results chan result + dictFlatePool sync.Pool + dstPool sync.Pool + wg sync.WaitGroup +} + +type result struct { + result chan []byte + notifyWritten chan struct{} +} + +// Use SetConcurrency to finetune the concurrency level if needed. +// +// With this you can control the approximate size of your blocks, +// as well as how many you want to be processing in parallel. +// +// Default values for this is SetConcurrency(250000, 16), +// meaning blocks are split at 250000 bytes and up to 16 blocks +// can be processing at once before the writer blocks. +func (z *Writer) SetConcurrency(blockSize, blocks int) error { + if blockSize <= tailSize { + return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize) + } + if blocks <= 0 { + return errors.New("gzip: blocks cannot be zero or less") + } + if blockSize == z.blockSize && blocks == z.blocks { + return nil + } + z.blockSize = blockSize + z.results = make(chan result, blocks) + z.blocks = blocks + z.dstPool = sync.Pool{New: func() interface{} { return make([]byte, 0, blockSize+(blockSize)>>4) }} + return nil +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write or Close. The Comment and Name header fields are +// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO +// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an +// error on Write. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < ConstantCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, level) + return z, nil +} + +// This function must be used by goroutines to set an +// error condition, since z.err access is restricted +// to the callers goruotine. +func (z *Writer) pushError(err error) { + z.errMu.Lock() + if z.err != nil { + z.errMu.Unlock() + return + } + z.err = err + close(z.pushedErr) + z.errMu.Unlock() +} + +func (z *Writer) init(w io.Writer, level int) { + z.wg.Wait() + digest := z.digest + if digest != nil { + digest.Reset() + } else { + digest = crc32.NewIEEE() + } + z.Header = Header{OS: 255} + z.w = w + z.level = level + z.digest = digest + z.pushedErr = make(chan struct{}, 0) + z.results = make(chan result, z.blocks) + z.err = nil + z.closed = false + z.Comment = "" + z.Extra = nil + z.ModTime = time.Time{} + z.wroteHeader = false + z.currentBuffer = nil + z.buf = [10]byte{} + z.prevTail = nil + z.size = 0 + if z.dictFlatePool.New == nil { + z.dictFlatePool.New = func() interface{} { + f, _ := flate.NewWriterDict(w, level, nil) + return f + } + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + if z.results != nil && !z.closed { + close(z.results) + } + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, z.level) +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func put2(p []byte, v uint16) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) +} + +func put4(p []byte, v uint32) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) + p[2] = uint8(v >> 16) + p[3] = uint8(v >> 24) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + put2(z.buf[0:2], uint16(len(b))) + _, err := z.w.Write(z.buf[0:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[0:1]) + return err +} + +// compressCurrent will compress the data currently buffered +// This should only be called from the main writer/flush/closer +func (z *Writer) compressCurrent(flush bool) { + r := result{} + r.result = make(chan []byte, 1) + r.notifyWritten = make(chan struct{}, 0) + select { + case z.results <- r: + case <-z.pushedErr: + return + } + + // If block given is more than twice the block size, split it. + c := z.currentBuffer + if len(c) > z.blockSize*2 { + c = c[:z.blockSize] + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, false) + z.prevTail = c[len(c)-tailSize:] + z.currentBuffer = z.currentBuffer[z.blockSize:] + z.compressCurrent(flush) + // Last one flushes if needed + return + } + + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, z.closed) + if len(c) > tailSize { + z.prevTail = c[len(c)-tailSize:] + } else { + z.prevTail = nil + } + z.currentBuffer = z.dstPool.Get().([]byte) + z.currentBuffer = z.currentBuffer[:0] + + // Wait if flushing + if flush { + <-r.notifyWritten + } +} + +// Returns an error if it has been set. +// Cannot be used by functions that are from internal goroutines. +func (z *Writer) checkError() error { + z.errMu.RLock() + err := z.err + z.errMu.RUnlock() + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed to output until +// the Writer is closed or Flush() is called. +// +// The function will return quickly, if there are unused buffers. +// The sent slice (p) is copied, and the caller is free to re-use the buffer +// when the function returns. +// +// Errors that occur during compression will be reported later, and a nil error +// does not signify that the compression succeeded (since it is most likely still running) +// That means that the call that returns an error may not be the call that caused it. +// Only Flush and Close functions are guaranteed to return any errors up to that point. +func (z *Writer) Write(p []byte) (int, error) { + if err := z.checkError(); err != nil { + return 0, err + } + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + put4(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + var n int + var err error + n, err = z.w.Write(z.buf[0:10]) + if err != nil { + z.pushError(err) + return n, err + } + if z.Extra != nil { + err = z.writeBytes(z.Extra) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Name != "" { + err = z.writeString(z.Name) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Comment != "" { + err = z.writeString(z.Comment) + if err != nil { + z.pushError(err) + return n, err + } + } + // Start receiving data from compressors + go func() { + listen := z.results + for { + r, ok := <-listen + // If closed, we are finished. + if !ok { + return + } + buf := <-r.result + n, err := z.w.Write(buf) + if err != nil { + z.pushError(err) + close(r.notifyWritten) + return + } + if n != len(buf) { + z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) + close(r.notifyWritten) + return + } + z.dstPool.Put(buf) + close(r.notifyWritten) + } + }() + z.currentBuffer = make([]byte, 0, z.blockSize) + } + q := p + for len(q) > 0 { + length := len(q) + if length+len(z.currentBuffer) > z.blockSize { + length = z.blockSize - len(z.currentBuffer) + } + z.digest.Write(q[:length]) + z.currentBuffer = append(z.currentBuffer, q[:length]...) + if len(z.currentBuffer) >= z.blockSize { + z.compressCurrent(false) + if err := z.checkError(); err != nil { + return len(p) - len(q) - length, err + } + } + z.size += length + q = q[length:] + } + return len(p), z.checkError() +} + +// Step 1: compresses buffer to buffer +// Step 2: send writer to channel +// Step 3: Close result channel to indicate we are done +func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { + defer func() { + close(r.result) + z.wg.Done() + }() + buf := z.dstPool.Get().([]byte) + dest := bytes.NewBuffer(buf[:0]) + + compressor := z.dictFlatePool.Get().(*flate.Writer) + compressor.ResetDict(dest, prevTail) + compressor.Write(p) + + err := compressor.Flush() + if err != nil { + z.pushError(err) + return + } + if closed { + err = compressor.Close() + if err != nil { + z.pushError(err) + return + } + } + z.dictFlatePool.Put(compressor) + // Read back buffer + buf = dest.Bytes() + r.result <- buf +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + if !z.wroteHeader { + _, err := z.Write(nil) + if err != nil { + return err + } + } + // We send current block to compression + z.compressCurrent(true) + + return z.checkError() +} + +// UncompressedSize will return the number of bytes written. +// pgzip only, not a function in the official gzip package. +func (z *Writer) UncompressedSize() int { + return z.size +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if err := z.checkError(); err != nil { + return err + } + } + z.compressCurrent(true) + if err := z.checkError(); err != nil { + return err + } + close(z.results) + put4(z.buf[0:4], z.digest.Sum32()) + put4(z.buf[4:8], uint32(z.size)) + _, err := z.w.Write(z.buf[0:8]) + if err != nil { + z.pushError(err) + return err + } + return nil +} diff --git a/vendor/github.com/klauspost/pgzip/gzip_test.go b/vendor/github.com/klauspost/pgzip/gzip_test.go new file mode 100644 index 00000000000..902468bced1 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gzip_test.go @@ -0,0 +1,614 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pgzip + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "sync" + "testing" + "time" +) + +// TestEmpty tests that an empty payload still forms a valid GZIP stream. +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + + if err := NewWriter(buf).Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if len(b) != 0 { + t.Fatalf("got %d bytes, want 0", len(b)) + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestRoundTrip tests that gzipping and then gunzipping is the identity +// function. +func TestRoundTrip(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + if _, err := w.Write([]byte("payload")); err != nil { + t.Fatalf("Write: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if string(b) != "payload" { + t.Fatalf("payload is %q, want %q", string(b), "payload") + } + if r.Comment != "comment" { + t.Fatalf("comment is %q, want %q", r.Comment, "comment") + } + if string(r.Extra) != "extra" { + t.Fatalf("extra is %q, want %q", r.Extra, "extra") + } + if r.ModTime.Unix() != 1e8 { + t.Fatalf("mtime is %d, want %d", r.ModTime.Unix(), uint32(1e8)) + } + if r.Name != "name" { + t.Fatalf("name is %q, want %q", r.Name, "name") + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestLatin1 tests the internal functions for converting to and from Latin-1. +func TestLatin1(t *testing.T) { + latin1 := []byte{0xc4, 'u', 0xdf, 'e', 'r', 'u', 'n', 'g', 0} + utf8 := "Äußerung" + z := Reader{r: bufio.NewReader(bytes.NewReader(latin1))} + s, err := z.readString() + if err != nil { + t.Fatalf("readString: %v", err) + } + if s != utf8 { + t.Fatalf("read latin-1: got %q, want %q", s, utf8) + } + + buf := bytes.NewBuffer(make([]byte, 0, len(latin1))) + c := Writer{w: buf} + if err = c.writeString(utf8); err != nil { + t.Fatalf("writeString: %v", err) + } + s = buf.String() + if s != string(latin1) { + t.Fatalf("write utf-8: got %q, want %q", s, string(latin1)) + } +} + +// TestLatin1RoundTrip tests that metadata that is representable in Latin-1 +// survives a round trip. +func TestLatin1RoundTrip(t *testing.T) { + testCases := []struct { + name string + ok bool + }{ + {"", true}, + {"ASCII is OK", true}, + {"unless it contains a NUL\x00", false}, + {"no matter where \x00 occurs", false}, + {"\x00\x00\x00", false}, + {"Látin-1 also passes (U+00E1)", true}, + {"but LĀtin Extended-A (U+0100) does not", false}, + {"neither does 日本語", false}, + {"invalid UTF-8 also \xffails", false}, + {"\x00 as does Látin-1 with NUL", false}, + } + for _, tc := range testCases { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Name = tc.name + err := w.Close() + if (err == nil) != tc.ok { + t.Errorf("Writer.Close: name = %q, err = %v", tc.name, err) + continue + } + if !tc.ok { + continue + } + + r, err := NewReader(buf) + if err != nil { + t.Errorf("NewReader: %v", err) + continue + } + _, err = ioutil.ReadAll(r) + if err != nil { + t.Errorf("ReadAll: %v", err) + continue + } + if r.Name != tc.name { + t.Errorf("name is %q, want %q", r.Name, tc.name) + continue + } + if err := r.Close(); err != nil { + t.Errorf("Reader.Close: %v", err) + continue + } + } +} + +func TestWriterFlush(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + + n0 := buf.Len() + if n0 != 0 { + t.Fatalf("buffer size = %d before writes; want 0", n0) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n1 := buf.Len() + if n1 == 0 { + t.Fatal("no data after first flush") + } + + w.Write([]byte("x")) + + n2 := buf.Len() + if n1 != n2 { + t.Fatalf("after writing a single byte, size changed from %d to %d; want no change", n1, n2) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n3 := buf.Len() + if n2 == n3 { + t.Fatal("Flush didn't flush any data") + } +} + +// Multiple gzip files concatenated form a valid gzip file. +func TestConcat(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(&buf) + w.Write([]byte("hello ")) + w.Close() + w = NewWriter(&buf) + w.Write([]byte("world\n")) + w.Close() + + r, err := NewReader(&buf) + data, err := ioutil.ReadAll(r) + if string(data) != "hello world\n" || err != nil { + t.Fatalf("ReadAll = %q, %v, want %q, nil", data, err, "hello world") + } +} + +func TestWriterReset(t *testing.T) { + buf := new(bytes.Buffer) + buf2 := new(bytes.Buffer) + z := NewWriter(buf) + msg := []byte("hello world") + z.Write(msg) + z.Close() + z.Reset(buf2) + z.Write(msg) + z.Close() + if buf.String() != buf2.String() { + t.Errorf("buf2 %q != original buf of %q", buf2.String(), buf.String()) + } +} + +var testbuf []byte + +func testFile(i int, t *testing.T) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dl := len(dat) + if len(testbuf) != i*dl { + // Make results predictable + testbuf = make([]byte, i*dl) + for j := 0; j < i; j++ { + copy(testbuf[j*dl:j*dl+dl], dat) + } + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, _ := NewWriterLevel(&buf, 6) + io.Copy(w, br) + w.Close() + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestFile1(t *testing.T) { testFile(1, t) } +func TestFile10(t *testing.T) { testFile(10, t) } + +func TestFile50(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(50, t) +} + +func TestFile200(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(200, t) +} + +func testBigGzip(i int, t *testing.T) { + if len(testbuf) != i { + // Make results predictable + rand.Seed(1337) + testbuf = make([]byte, i) + for idx := range testbuf { + testbuf[idx] = byte(65 + rand.Intn(32)) + } + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, _ := NewWriterLevel(&buf, 6) + io.Copy(w, br) + // Test UncompressedSize() + if len(testbuf) != w.UncompressedSize() { + t.Errorf("uncompressed size does not match. buffer:%d, UncompressedSize():%d", len(testbuf), w.UncompressedSize()) + } + err := w.Close() + if err != nil { + t.Fatal(err.Error()) + } + // Close should not affect the number + if len(testbuf) != w.UncompressedSize() { + t.Errorf("uncompressed size does not match. buffer:%d, UncompressedSize():%d", len(testbuf), w.UncompressedSize()) + } + + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestGzip1K(t *testing.T) { testBigGzip(1000, t) } +func TestGzip100K(t *testing.T) { testBigGzip(100000, t) } +func TestGzip1M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + + testBigGzip(1000000, t) +} +func TestGzip10M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testBigGzip(10000000, t) +} + +// Test if two runs produce identical results. +func TestDeterministicLM2(t *testing.T) { testDeterm(-2, t) } +func TestDeterministicL0(t *testing.T) { testDeterm(0, t) } +func TestDeterministicL1(t *testing.T) { testDeterm(1, t) } +func TestDeterministicL2(t *testing.T) { testDeterm(2, t) } +func TestDeterministicL3(t *testing.T) { testDeterm(3, t) } +func TestDeterministicL4(t *testing.T) { testDeterm(4, t) } +func TestDeterministicL5(t *testing.T) { testDeterm(5, t) } +func TestDeterministicL6(t *testing.T) { testDeterm(6, t) } +func TestDeterministicL7(t *testing.T) { testDeterm(7, t) } +func TestDeterministicL8(t *testing.T) { testDeterm(8, t) } +func TestDeterministicL9(t *testing.T) { testDeterm(9, t) } + +func testDeterm(i int, t *testing.T) { + var length = defaultBlockSize*defaultBlocks + 500 + if testing.Short() { + length = defaultBlockSize*2 + 500 + } + rand.Seed(1337) + t1 := make([]byte, length) + for idx := range t1 { + t1[idx] = byte(65 + rand.Intn(8)) + } + + br := bytes.NewBuffer(t1) + var b1 bytes.Buffer + w, err := NewWriterLevel(&b1, i) + if err != nil { + t.Fatal(err) + } + // Use a very small prime sized buffer. + cbuf := make([]byte, 787) + _, err = copyBuffer(w, br, cbuf) + if err != nil { + t.Fatal(err) + } + w.Flush() + w.Close() + + rand.Seed(1337) + t2 := make([]byte, length) + for idx := range t2 { + t2[idx] = byte(65 + rand.Intn(8)) + } + + br2 := bytes.NewBuffer(t2) + var b2 bytes.Buffer + w2, err := NewWriterLevel(&b2, i) + if err != nil { + t.Fatal(err) + } + // We choose a different buffer size, + // bigger than a maximum block, and also a prime. + cbuf = make([]byte, 81761) + _, err = copyBuffer(w2, br2, cbuf) + if err != nil { + t.Fatal(err) + } + w2.Flush() + w2.Close() + + b1b := b1.Bytes() + b2b := b2.Bytes() + + if bytes.Compare(b1b, b2b) != 0 { + t.Fatalf("Level %d did not produce deterministric result, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) + } +} + +func BenchmarkGzipL1(b *testing.B) { benchmarkGzipN(b, 1) } +func BenchmarkGzipL2(b *testing.B) { benchmarkGzipN(b, 2) } +func BenchmarkGzipL3(b *testing.B) { benchmarkGzipN(b, 3) } +func BenchmarkGzipL4(b *testing.B) { benchmarkGzipN(b, 4) } +func BenchmarkGzipL5(b *testing.B) { benchmarkGzipN(b, 5) } +func BenchmarkGzipL6(b *testing.B) { benchmarkGzipN(b, 6) } +func BenchmarkGzipL7(b *testing.B) { benchmarkGzipN(b, 7) } +func BenchmarkGzipL8(b *testing.B) { benchmarkGzipN(b, 8) } +func BenchmarkGzipL9(b *testing.B) { benchmarkGzipN(b, 9) } + +func benchmarkGzipN(b *testing.B, level int) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + w, _ := NewWriterLevel(ioutil.Discard, level) + w.Write(dat) + w.Flush() + w.Close() + } +} + +type errorWriter struct { + mu sync.RWMutex + returnError bool +} + +func (e *errorWriter) ErrorNow() { + e.mu.Lock() + e.returnError = true + e.mu.Unlock() +} + +func (e *errorWriter) Reset() { + e.mu.Lock() + e.returnError = false + e.mu.Unlock() +} + +func (e *errorWriter) Write(b []byte) (int, error) { + e.mu.RLock() + defer e.mu.RUnlock() + if e.returnError { + return 0, fmt.Errorf("Intentional Error") + } + return len(b), nil +} + +// TestErrors tests that errors are returned and that +// error state is maintained and reset by Reset. +func TestErrors(t *testing.T) { + ew := &errorWriter{} + w := NewWriter(ew) + dat, _ := ioutil.ReadFile("testdata/test.json") + n := 0 + ew.ErrorNow() + for { + _, err := w.Write(dat) + if err != nil { + break + } + if n > 1000 { + t.Fatal("did not get error before 1000 iterations") + } + n++ + } + if err := w.Close(); err == nil { + t.Fatal("Writer.Close: Should have returned error") + } + ew.Reset() + w.Reset(ew) + _, err := w.Write(dat) + if err != nil { + t.Fatal("Writer after Reset, unexpected error:", err) + } + ew.ErrorNow() + if err = w.Flush(); err == nil { + t.Fatal("Writer.Flush: Should have returned error") + } + if err = w.Close(); err == nil { + t.Fatal("Writer.Close: Should have returned error") + } + // Test Sync only + w.Reset(ew) + if err = w.Flush(); err == nil { + t.Fatal("Writer.Flush: Should have returned error") + } + if err = w.Close(); err == nil { + t.Fatal("Writer.Close: Should have returned error") + } + // Test Close only + w.Reset(ew) + if err = w.Close(); err == nil { + t.Fatal("Writer.Close: Should have returned error") + } + +} + +// A writer that fails after N writes. +type errorWriter2 struct { + N int +} + +func (e *errorWriter2) Write(b []byte) (int, error) { + if e.N <= 0 { + return 0, io.ErrClosedPipe + } + e.N-- + return len(b), nil +} + +// Test if errors from the underlying writer is passed upwards. +func TestWriteError(t *testing.T) { + buf := new(bytes.Buffer) + n := 65536 + if !testing.Short() { + n *= 4 + } + for i := 0; i < n; i++ { + fmt.Fprintf(buf, "asdasfasf%d%dfghfgujyut%dyutyu\n", i, i, i) + } + in := buf.Bytes() + // We create our own buffer to control number of writes. + copyBuf := make([]byte, 128) + for l := 0; l < 10; l++ { + for fail := 1; fail <= 16; fail *= 2 { + // Fail after 'fail' writes + ew := &errorWriter2{N: fail} + w, err := NewWriterLevel(ew, l) + if err != nil { + t.Fatalf("NewWriter: level %d: %v", l, err) + } + n, err := copyBuffer(w, bytes.NewBuffer(in), copyBuf) + if err == nil { + t.Fatalf("Level %d: Expected an error, writer was %#v", l, ew) + } + n2, err := w.Write([]byte{1, 2, 2, 3, 4, 5}) + if n2 != 0 { + t.Fatal("Level", l, "Expected 0 length write, got", n) + } + if err == nil { + t.Fatal("Level", l, "Expected an error") + } + err = w.Flush() + if err == nil { + t.Fatal("Level", l, "Expected an error on flush") + } + err = w.Close() + if err == nil { + t.Fatal("Level", l, "Expected an error on close") + } + + w.Reset(ioutil.Discard) + n2, err = w.Write([]byte{1, 2, 3, 4, 5, 6}) + if err != nil { + t.Fatal("Level", l, "Got unexpected error after reset:", err) + } + if n2 == 0 { + t.Fatal("Level", l, "Got 0 length write, expected > 0") + } + if testing.Short() { + return + } + } + } +} + +// copyBuffer is a copy of io.CopyBuffer, since we want to support older go versions. +// This is modified to never use io.WriterTo or io.ReaderFrom interfaces. +func copyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + if buf == nil { + buf = make([]byte, 32*1024) + } + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/vendor/github.com/klauspost/pgzip/testdata/issue6550.gz b/vendor/github.com/klauspost/pgzip/testdata/issue6550.gz new file mode 100644 index 00000000000..57972b63668 Binary files /dev/null and b/vendor/github.com/klauspost/pgzip/testdata/issue6550.gz differ diff --git a/vendor/github.com/klauspost/pgzip/testdata/test.json b/vendor/github.com/klauspost/pgzip/testdata/test.json new file mode 100644 index 00000000000..3b7b678b5e4 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/testdata/test.json @@ -0,0 +1,5902 @@ +[ + { + "_id": "543fa821aeca0fed7f182f01", + "index": 0, + "guid": "3526d142-6d2b-4266-9855-e6ec1589a265", + "isActive": false, + "balance": "$2,156.72", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "brown", + "name": { + "first": "Rosella", + "last": "Hale" + }, + "company": "SKINSERVE", + "email": "rosella.hale@skinserve.net", + "phone": "+1 (920) 528-2959", + "address": "324 Imlay Street, Sehili, Guam, 3022", + "about": "Est consectetur ut incididunt commodo elit cillum incididunt consectetur id officia pariatur pariatur cillum. Ipsum non incididunt tempor non. Cillum aliquip aliquip non minim ipsum voluptate incididunt adipisicing aute pariatur laborum minim deserunt laborum. Do do consequat enim adipisicing dolor incididunt reprehenderit sint. Veniam dolor consequat sint ullamco id enim occaecat.\r\n", + "registered": "Wednesday, August 27, 2014 9:12 PM", + "latitude": 43.44586, + "longitude": -65.480986, + "tags": [ + "Lorem", + "ex", + "magna", + "aliqua", + "id", + "sint", + "elit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Etta Stanton" + }, + { + "id": 1, + "name": "Cora Velazquez" + }, + { + "id": 2, + "name": "Deann Guy" + } + ], + "greeting": "Hello, Rosella! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8218066e8499ef38bcc", + "index": 1, + "guid": "991a35b5-91db-49e8-8a1e-13688b5ed88d", + "isActive": true, + "balance": "$1,762.71", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "green", + "name": { + "first": "Rose", + "last": "Lynn" + }, + "company": "NITRACYR", + "email": "rose.lynn@nitracyr.com", + "phone": "+1 (912) 564-2131", + "address": "485 Pulaski Street, Logan, Mississippi, 7453", + "about": "Minim proident enim eiusmod reprehenderit excepteur laboris. Adipisicing culpa cupidatat eiusmod exercitation reprehenderit anim. Nostrud mollit reprehenderit reprehenderit id magna et id esse cillum et proident. Incididunt eu nisi excepteur est est irure voluptate id nulla. Laboris consectetur aliqua cupidatat ex elit proident officia ex quis. Minim officia eu eiusmod velit. Ullamco dolor non quis aliqua cupidatat amet laborum laborum ad ex proident qui eiusmod ea.\r\n", + "registered": "Sunday, October 5, 2014 10:36 PM", + "latitude": -3.548698, + "longitude": 79.421107, + "tags": [ + "exercitation", + "adipisicing", + "aliqua", + "do", + "id", + "veniam", + "est" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Ada Little" + }, + { + "id": 1, + "name": "Lopez Osborne" + }, + { + "id": 2, + "name": "Tami Leach" + } + ], + "greeting": "Hello, Rose! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821255974bb9f89e5ea", + "index": 2, + "guid": "e5727238-63a4-4e1e-88cc-67300826259c", + "isActive": false, + "balance": "$2,131.97", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "green", + "name": { + "first": "Gloria", + "last": "Richards" + }, + "company": "SPHERIX", + "email": "gloria.richards@spherix.biz", + "phone": "+1 (884) 536-3434", + "address": "493 Judge Street, Cetronia, Rhode Island, 4439", + "about": "Lorem cupidatat ea et laboris tempor enim non. Sit consequat culpa et qui aute cillum ut ullamco. Nulla duis sit Lorem incididunt mollit nostrud dolor veniam ullamco. Sunt magna id velit in laborum nisi labore. Id deserunt labore dolore dolor aliqua culpa est id duis.\r\n", + "registered": "Saturday, March 29, 2014 8:18 AM", + "latitude": 60.328012, + "longitude": 126.657357, + "tags": [ + "dolore", + "laboris", + "proident", + "cillum", + "in", + "fugiat", + "incididunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bowen Cote" + }, + { + "id": 1, + "name": "Olga Gardner" + }, + { + "id": 2, + "name": "Evangeline Howard" + } + ], + "greeting": "Hello, Gloria! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8212b7e1e8201a38702", + "index": 3, + "guid": "bab757bd-2ebd-4c2c-86b7-0d4d8b059d35", + "isActive": true, + "balance": "$2,509.81", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Casey", + "last": "Hayes" + }, + "company": "SURELOGIC", + "email": "casey.hayes@surelogic.co.uk", + "phone": "+1 (993) 573-3937", + "address": "330 Tapscott Avenue, Eastvale, New Mexico, 928", + "about": "Eu elit sint sunt labore dolor cillum esse ad voluptate commodo. Dolor aliqua do dolore ex tempor sint consequat culpa et consectetur nisi voluptate reprehenderit. Dolor velit eu cillum tempor anim anim. Nostrud laboris eiusmod elit enim duis in consectetur esse anim qui. Et eiusmod culpa nulla anim et officia pariatur reprehenderit eiusmod veniam. Ullamco nisi ea incididunt velit. Ullamco cillum mollit ea aliqua ea eu et enim.\r\n", + "registered": "Sunday, September 14, 2014 8:35 AM", + "latitude": -43.494604, + "longitude": 95.217518, + "tags": [ + "officia", + "sunt", + "dolore", + "qui", + "elit", + "irure", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Serrano Wise" + }, + { + "id": 1, + "name": "Lorene Macias" + }, + { + "id": 2, + "name": "Kristen Lott" + } + ], + "greeting": "Hello, Casey! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821bfefa43403d5d054", + "index": 4, + "guid": "675d1598-8c45-4d67-a4df-d38a270de371", + "isActive": false, + "balance": "$3,887.07", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Price", + "last": "Oconnor" + }, + "company": "ANOCHA", + "email": "price.oconnor@anocha.tv", + "phone": "+1 (855) 410-3197", + "address": "447 Stockholm Street, Templeton, Wisconsin, 2216", + "about": "Cillum veniam esse duis tempor incididunt do dolor officia elit eu. Excepteur velit reprehenderit minim Lorem commodo est. Duis Lorem nisi elit aliquip est deserunt fugiat ut. Nisi tempor ex est pariatur laborum eiusmod anim eu nulla. Nisi enim id aute id ex id nostrud.\r\n", + "registered": "Wednesday, May 14, 2014 5:19 PM", + "latitude": 26.083477, + "longitude": 122.61114, + "tags": [ + "in", + "ad", + "aliqua", + "minim", + "nisi", + "cupidatat", + "id" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Montgomery Mccray" + }, + { + "id": 1, + "name": "Lucia Ferrell" + }, + { + "id": 2, + "name": "Glover Brock" + } + ], + "greeting": "Hello, Price! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a699260d8ed4439a", + "index": 5, + "guid": "5e271270-fef3-48a7-b389-346251b46abc", + "isActive": false, + "balance": "$1,046.50", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Rita", + "last": "Huber" + }, + "company": "VURBO", + "email": "rita.huber@vurbo.name", + "phone": "+1 (803) 589-3948", + "address": "838 River Street, Gadsden, American Samoa, 2602", + "about": "Culpa quis qui exercitation velit officia eu id qui consequat qui. Ea fugiat quis fugiat proident velit. Velit et reprehenderit quis irure adipisicing duis dolor id cupidatat ea aliqua elit.\r\n", + "registered": "Friday, April 11, 2014 11:56 AM", + "latitude": 30.717665, + "longitude": -29.687902, + "tags": [ + "veniam", + "ex", + "deserunt", + "cillum", + "sint", + "eu", + "proident" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Oliver Terrell" + }, + { + "id": 1, + "name": "Lora Shepherd" + }, + { + "id": 2, + "name": "Guzman Holman" + } + ], + "greeting": "Hello, Rita! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821b888230e87ce950e", + "index": 6, + "guid": "7e4efd9a-4923-42ae-8924-d6d5fae80ec0", + "isActive": false, + "balance": "$1,205.59", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Peterson", + "last": "Oliver" + }, + "company": "ZENTIX", + "email": "peterson.oliver@zentix.me", + "phone": "+1 (924) 564-2815", + "address": "596 Middleton Street, Walker, Louisiana, 3358", + "about": "Exercitation cillum sit exercitation voluptate duis nostrud incididunt cillum sint minim labore tempor minim ad. Esse ad id pariatur cillum id exercitation ullamco elit. Quis nisi excepteur mollit consectetur id et. Ea voluptate nulla duis minim exercitation aliqua aute nisi enim enim excepteur dolor ad non. Aliquip elit eu enim officia minim enim Lorem tempor. Cillum anim aute sunt cupidatat deserunt consequat.\r\n", + "registered": "Friday, March 28, 2014 2:28 PM", + "latitude": 45.092029, + "longitude": 56.730029, + "tags": [ + "in", + "voluptate", + "sit", + "sit", + "Lorem", + "reprehenderit", + "esse" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Wiley Henry" + }, + { + "id": 1, + "name": "Downs Rowland" + }, + { + "id": 2, + "name": "White Guerra" + } + ], + "greeting": "Hello, Peterson! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210f8589ab846b3d88", + "index": 7, + "guid": "afc25e30-7e70-4a87-bdd3-519e1837969a", + "isActive": false, + "balance": "$3,928.85", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Shauna", + "last": "Morse" + }, + "company": "CENTICE", + "email": "shauna.morse@centice.us", + "phone": "+1 (926) 517-3679", + "address": "752 Dunne Place, Ebro, Kansas, 3215", + "about": "Cupidatat incididunt sit duis tempor labore dolore aute qui magna in. Consequat aute ut veniam laborum aliqua Lorem esse. Cillum in qui sint excepteur eiusmod eiusmod eu anim adipisicing et.\r\n", + "registered": "Saturday, September 6, 2014 2:32 PM", + "latitude": 36.341849, + "longitude": 108.378341, + "tags": [ + "in", + "nulla", + "labore", + "qui", + "id", + "enim", + "fugiat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lizzie Carson" + }, + { + "id": 1, + "name": "Eliza Hall" + }, + { + "id": 2, + "name": "Baxter Burton" + } + ], + "greeting": "Hello, Shauna! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8213c997bd81d4a7fa5", + "index": 8, + "guid": "1337ad27-17e3-459f-90a3-a43b54b88184", + "isActive": true, + "balance": "$2,096.67", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "blue", + "name": { + "first": "Glenn", + "last": "Brooks" + }, + "company": "MANGLO", + "email": "glenn.brooks@manglo.ca", + "phone": "+1 (895) 595-2669", + "address": "605 McDonald Avenue, Nicholson, Indiana, 2302", + "about": "Deserunt incididunt ullamco dolore nostrud cupidatat sit consequat adipisicing incididunt sunt. Laboris fugiat et laboris est eu laborum culpa. Labore ad aliquip ut enim aute nulla quis cillum dolor aliqua. Culpa labore occaecat et sunt qui. Velit consequat ad proident non voluptate non mollit eu et cillum tempor. Velit quis deserunt Lorem cupidatat enim ut.\r\n", + "registered": "Friday, March 21, 2014 9:06 AM", + "latitude": -40.51084, + "longitude": -137.771438, + "tags": [ + "enim", + "laboris", + "culpa", + "do", + "nulla", + "anim", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Shelly Cardenas" + }, + { + "id": 1, + "name": "Kristine Mendoza" + }, + { + "id": 2, + "name": "Hall Hendrix" + } + ], + "greeting": "Hello, Glenn! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821054bcf388259b272", + "index": 9, + "guid": "cd9601fc-7ca7-4d54-830b-145ff5c5c147", + "isActive": true, + "balance": "$1,816.14", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Maribel", + "last": "Small" + }, + "company": "FUTURITY", + "email": "maribel.small@futurity.org", + "phone": "+1 (825) 532-2134", + "address": "424 Rockaway Parkway, Vale, Alaska, 1834", + "about": "Aliqua irure culpa exercitation nostrud qui exercitation deserunt ullamco culpa aliquip irure. Proident officia in consequat laborum ex adipisicing exercitation proident anim cupidatat excepteur anim. Labore irure pariatur laboris reprehenderit.\r\n", + "registered": "Tuesday, July 29, 2014 9:59 PM", + "latitude": 53.843872, + "longitude": 85.292318, + "tags": [ + "dolore", + "laborum", + "aute", + "aliqua", + "nostrud", + "commodo", + "commodo" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Anthony Bray" + }, + { + "id": 1, + "name": "Vicki Kelly" + }, + { + "id": 2, + "name": "Baird Wagner" + } + ], + "greeting": "Hello, Maribel! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821f14efb6f7f1210b9", + "index": 10, + "guid": "c9400d51-ea8d-4748-9a10-fa0e3a037b23", + "isActive": false, + "balance": "$1,395.15", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Kendra", + "last": "Knapp" + }, + "company": "UNCORP", + "email": "kendra.knapp@uncorp.biz", + "phone": "+1 (830) 509-3054", + "address": "765 Cameron Court, Ferney, Florida, 1963", + "about": "Duis irure ea qui ut velit nostrud. Lorem laborum excepteur do qui ad sit culpa. Labore mollit mollit deserunt sint aute officia qui laboris dolor aliqua magna in officia.\r\n", + "registered": "Sunday, April 27, 2014 11:55 AM", + "latitude": 84.200593, + "longitude": -155.377179, + "tags": [ + "laborum", + "labore", + "aliquip", + "ex", + "voluptate", + "dolor", + "Lorem" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Marva Cash" + }, + { + "id": 1, + "name": "Aimee Velez" + }, + { + "id": 2, + "name": "Eaton Delgado" + } + ], + "greeting": "Hello, Kendra! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82119105322ba52b407", + "index": 11, + "guid": "b6862230-95da-43d5-97ba-13ed4bcc0744", + "isActive": false, + "balance": "$1,442.51", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "brown", + "name": { + "first": "Katrina", + "last": "Ferguson" + }, + "company": "MANTRIX", + "email": "katrina.ferguson@mantrix.io", + "phone": "+1 (938) 541-3037", + "address": "447 Putnam Avenue, Collins, Georgia, 8421", + "about": "Minim aliquip Lorem fugiat et fugiat esse aliqua consectetur non officia esse. Fugiat irure eu ut irure cillum mollit nisi consequat do cillum. Est exercitation deserunt proident ex cupidatat. Elit aliquip pariatur ad minim adipisicing qui. Quis enim laborum incididunt eiusmod deserunt cillum amet enim. Proident et do voluptate esse laboris nisi. Duis cupidatat fugiat adipisicing aute velit et ullamco anim velit velit et excepteur laboris.\r\n", + "registered": "Tuesday, February 4, 2014 5:01 PM", + "latitude": 43.287084, + "longitude": 133.518964, + "tags": [ + "magna", + "officia", + "reprehenderit", + "excepteur", + "cillum", + "veniam", + "officia" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tameka Mccullough" + }, + { + "id": 1, + "name": "Madden Vincent" + }, + { + "id": 2, + "name": "Jewel Mccarthy" + } + ], + "greeting": "Hello, Katrina! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821ee55902ac207b17a", + "index": 12, + "guid": "50161207-4477-47b9-8aa7-a845c3c2e96f", + "isActive": false, + "balance": "$1,937.64", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Gilliam", + "last": "Flowers" + }, + "company": "INTERLOO", + "email": "gilliam.flowers@interloo.net", + "phone": "+1 (930) 564-2474", + "address": "986 Elton Street, Bagtown, Alabama, 9115", + "about": "Velit incididunt ut nulla adipisicing ad qui sint dolor cillum cupidatat in. Commodo aliqua deserunt ea eu irure irure nisi ullamco culpa nostrud. Adipisicing exercitation excepteur et id cupidatat. Ullamco ut incididunt proident est ad deserunt duis id ut. Excepteur cupidatat irure reprehenderit et excepteur minim cillum occaecat adipisicing. Commodo fugiat ad ex consectetur commodo dolore id nisi deserunt commodo aliquip. Veniam amet mollit nulla adipisicing eu minim sit magna incididunt adipisicing.\r\n", + "registered": "Tuesday, April 1, 2014 12:42 AM", + "latitude": -55.19047, + "longitude": 177.975351, + "tags": [ + "sint", + "pariatur", + "incididunt", + "exercitation", + "quis", + "ad", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Patsy Hunter" + }, + { + "id": 1, + "name": "Cecilia Green" + }, + { + "id": 2, + "name": "Meyer Jones" + } + ], + "greeting": "Hello, Gilliam! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821658f3962ce822678", + "index": 13, + "guid": "7017674f-79b3-43ed-8832-2b787c51f59d", + "isActive": false, + "balance": "$2,291.31", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Roberts", + "last": "Floyd" + }, + "company": "OVOLO", + "email": "roberts.floyd@ovolo.com", + "phone": "+1 (935) 401-2916", + "address": "723 Amherst Street, Brady, District Of Columbia, 4241", + "about": "Occaecat incididunt eu do quis est. Est mollit incididunt sint aute sunt. Consectetur incididunt officia eu fugiat quis officia pariatur excepteur sint. In enim nostrud nisi culpa. Ex incididunt exercitation id voluptate.\r\n", + "registered": "Thursday, June 19, 2014 4:16 PM", + "latitude": 72.321258, + "longitude": 28.548926, + "tags": [ + "et", + "ipsum", + "anim", + "dolor", + "commodo", + "do", + "exercitation" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tracey Vasquez" + }, + { + "id": 1, + "name": "Castro Harrell" + }, + { + "id": 2, + "name": "Sanders Barr" + } + ], + "greeting": "Hello, Roberts! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821f95ca5383b9eb53d", + "index": 14, + "guid": "a6532d9a-d291-4a51-92e7-33c50ceecc12", + "isActive": true, + "balance": "$3,310.31", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Miles", + "last": "Valdez" + }, + "company": "DENTREX", + "email": "miles.valdez@dentrex.biz", + "phone": "+1 (960) 513-3228", + "address": "726 Stillwell Place, Soham, Pennsylvania, 3510", + "about": "Sit labore ex commodo duis tempor labore officia et et est qui ullamco. Aute elit in labore laboris magna duis ipsum excepteur anim laboris ipsum magna magna non. Sint mollit eiusmod in est sint ipsum excepteur do anim cillum cillum.\r\n", + "registered": "Thursday, May 1, 2014 6:08 PM", + "latitude": 88.123309, + "longitude": -121.226418, + "tags": [ + "voluptate", + "sunt", + "anim", + "laboris", + "exercitation", + "deserunt", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bradford Horn" + }, + { + "id": 1, + "name": "Hanson Dillon" + }, + { + "id": 2, + "name": "Whitley Stanley" + } + ], + "greeting": "Hello, Miles! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821c52f80cda8ff36b3", + "index": 15, + "guid": "63820e33-a8c7-410e-afa9-32b7f7017a32", + "isActive": true, + "balance": "$2,616.09", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Floyd", + "last": "Barker" + }, + "company": "TALAE", + "email": "floyd.barker@talae.co.uk", + "phone": "+1 (843) 435-2898", + "address": "501 Sullivan Place, Cotopaxi, Nevada, 5498", + "about": "Non deserunt voluptate occaecat est mollit dolor aliqua. Qui elit aute qui aliquip ipsum et labore est aliquip pariatur. Sint deserunt tempor dolore excepteur elit est sint in est ex anim. Nostrud culpa amet eiusmod incididunt. Ea exercitation amet labore cillum culpa duis aute incididunt dolore sunt. Cillum velit laboris quis eiusmod fugiat consectetur sit fugiat irure labore.\r\n", + "registered": "Monday, March 10, 2014 7:48 AM", + "latitude": -86.397923, + "longitude": 171.646534, + "tags": [ + "dolore", + "sit", + "qui", + "id", + "aliquip", + "mollit", + "laborum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Eileen Daniels" + }, + { + "id": 1, + "name": "Genevieve Wood" + }, + { + "id": 2, + "name": "Carver Fields" + } + ], + "greeting": "Hello, Floyd! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8219eb621c418384935", + "index": 16, + "guid": "ed67eac2-cfdf-4d28-a734-bc973cba8613", + "isActive": false, + "balance": "$1,917.58", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Carrillo", + "last": "Cox" + }, + "company": "ARCHITAX", + "email": "carrillo.cox@architax.tv", + "phone": "+1 (818) 444-3875", + "address": "309 Randolph Street, Avoca, Illinois, 770", + "about": "Labore consequat et nostrud officia ad. Sint ipsum ipsum sint laboris adipisicing minim voluptate aliqua proident est commodo nulla. Officia sint ipsum laborum aliquip adipisicing adipisicing ea et reprehenderit dolore. Et deserunt sint incididunt velit dolore voluptate deserunt anim nisi sit est officia fugiat. Velit dolore ea do enim veniam ut do. Duis adipisicing fugiat magna Lorem ullamco quis sint ut cupidatat laborum aute laboris sint aliqua.\r\n", + "registered": "Friday, January 17, 2014 12:19 AM", + "latitude": -31.228015, + "longitude": -82.248255, + "tags": [ + "occaecat", + "nostrud", + "ex", + "dolor", + "magna", + "minim", + "pariatur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Douglas Mayer" + }, + { + "id": 1, + "name": "Dorothy Riddle" + }, + { + "id": 2, + "name": "Melanie Thompson" + } + ], + "greeting": "Hello, Carrillo! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821e152869356625777", + "index": 17, + "guid": "1dc17af3-a194-42e8-add8-a73853f16da2", + "isActive": true, + "balance": "$1,703.08", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "green", + "name": { + "first": "Ana", + "last": "Reese" + }, + "company": "FORTEAN", + "email": "ana.reese@fortean.name", + "phone": "+1 (876) 419-2128", + "address": "451 Brevoort Place, Leola, Tennessee, 3725", + "about": "Consectetur officia irure proident nulla. Anim veniam mollit sit id aliqua. Do reprehenderit culpa magna magna aute est pariatur consequat ut occaecat cillum adipisicing consectetur. Sint qui pariatur id velit deserunt laborum. Minim consequat ut sunt qui. Ex occaecat tempor fugiat sit anim veniam incididunt mollit mollit. Non id anim cillum culpa tempor voluptate aute consequat proident reprehenderit.\r\n", + "registered": "Saturday, June 28, 2014 4:53 AM", + "latitude": 80.18306, + "longitude": 70.818006, + "tags": [ + "mollit", + "voluptate", + "est", + "magna", + "ad", + "duis", + "est" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mclaughlin Johns" + }, + { + "id": 1, + "name": "Leanne Hanson" + }, + { + "id": 2, + "name": "Isabel Leon" + } + ], + "greeting": "Hello, Ana! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82168ed66f19f510aea", + "index": 18, + "guid": "87be73dd-bf5e-48c4-8168-f8f76cda905d", + "isActive": true, + "balance": "$1,307.88", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Daugherty", + "last": "Ware" + }, + "company": "TYPHONICA", + "email": "daugherty.ware@typhonica.me", + "phone": "+1 (936) 470-3445", + "address": "919 Oriental Boulevard, Westboro, Iowa, 2587", + "about": "Occaecat in nisi et consequat. Laboris minim consequat qui proident id aute occaecat pariatur. Sint esse anim id ex voluptate fugiat culpa anim commodo incididunt.\r\n", + "registered": "Tuesday, March 4, 2014 11:53 PM", + "latitude": -15.007384, + "longitude": -86.496257, + "tags": [ + "consequat", + "nisi", + "duis", + "cupidatat", + "anim", + "eu", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maddox Wells" + }, + { + "id": 1, + "name": "Maura May" + }, + { + "id": 2, + "name": "Terry Calhoun" + } + ], + "greeting": "Hello, Daugherty! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8210abc44da895c5591", + "index": 19, + "guid": "32c4c5d0-b54c-4ea4-999a-f4fa517ac5ce", + "isActive": true, + "balance": "$2,706.98", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Sonja", + "last": "Craft" + }, + "company": "KOZGENE", + "email": "sonja.craft@kozgene.us", + "phone": "+1 (808) 410-3427", + "address": "808 Lawn Court, Blodgett, Massachusetts, 560", + "about": "Eu occaecat reprehenderit ea ad ullamco ea sint cupidatat ex. Deserunt eu est veniam consectetur do anim in. Dolore minim veniam dolore elit sunt labore id eiusmod.\r\n", + "registered": "Sunday, August 31, 2014 12:09 AM", + "latitude": -47.101894, + "longitude": -130.294589, + "tags": [ + "sint", + "cillum", + "magna", + "sit", + "fugiat", + "nisi", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Loretta Mcgee" + }, + { + "id": 1, + "name": "Wilson Merritt" + }, + { + "id": 2, + "name": "Susanne Lloyd" + } + ], + "greeting": "Hello, Sonja! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82134480e13e931193a", + "index": 20, + "guid": "c4a7ded5-aa3e-411f-9956-f4b938ce93dc", + "isActive": false, + "balance": "$3,216.50", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Powers", + "last": "Mathews" + }, + "company": "ANDRYX", + "email": "powers.mathews@andryx.ca", + "phone": "+1 (914) 559-2596", + "address": "545 Nolans Lane, Brenton, Arkansas, 7607", + "about": "In irure et tempor ad commodo culpa reprehenderit excepteur tempor ex. Exercitation eiusmod consequat anim incididunt veniam duis sunt velit sunt aliquip esse adipisicing do. Elit ea incididunt id amet mollit ea in ad ea cupidatat duis minim consectetur incididunt.\r\n", + "registered": "Friday, July 11, 2014 11:27 AM", + "latitude": 64.259332, + "longitude": 111.604942, + "tags": [ + "ut", + "ex", + "cillum", + "commodo", + "pariatur", + "ex", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Woodward Witt" + }, + { + "id": 1, + "name": "Hazel Mcfadden" + }, + { + "id": 2, + "name": "Desiree Mclean" + } + ], + "greeting": "Hello, Powers! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821117e0b8bd3f6e566", + "index": 21, + "guid": "fae9ba5c-948b-429c-b1e6-a0a6835f0694", + "isActive": false, + "balance": "$1,046.41", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "brown", + "name": { + "first": "Lola", + "last": "Roy" + }, + "company": "EURON", + "email": "lola.roy@euron.org", + "phone": "+1 (920) 413-2000", + "address": "237 Tabor Court, Berwind, Hawaii, 2276", + "about": "Aute voluptate proident occaecat exercitation aute proident ullamco veniam aute magna velit cupidatat. Occaecat dolor aliquip adipisicing dolor do elit eu elit laboris officia magna dolore. Velit tempor sit ad et occaecat nisi elit excepteur. Non velit sint deserunt culpa magna irure.\r\n", + "registered": "Thursday, February 27, 2014 1:20 AM", + "latitude": 12.90929, + "longitude": 68.693395, + "tags": [ + "ad", + "officia", + "non", + "aute", + "magna", + "minim", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lee Tate" + }, + { + "id": 1, + "name": "Miranda Payne" + }, + { + "id": 2, + "name": "Jocelyn Cantrell" + } + ], + "greeting": "Hello, Lola! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8211cf627425d947100", + "index": 22, + "guid": "590c913b-2457-47a5-ad5c-8a5fc3c249f9", + "isActive": true, + "balance": "$2,144.16", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "green", + "name": { + "first": "Marci", + "last": "Mcpherson" + }, + "company": "VIAGREAT", + "email": "marci.mcpherson@viagreat.biz", + "phone": "+1 (916) 417-2166", + "address": "527 Kenilworth Place, Bartonsville, Puerto Rico, 4739", + "about": "Duis velit irure sit sit aliquip sit culpa velit labore velit ipsum amet. Pariatur labore ex et sunt proident ad minim. Aliquip qui adipisicing elit do sunt mollit irure adipisicing in labore cillum. Ut velit dolor cillum irure voluptate ad incididunt consequat cillum esse laborum consequat do.\r\n", + "registered": "Wednesday, August 27, 2014 6:55 AM", + "latitude": 23.135493, + "longitude": -133.213153, + "tags": [ + "ut", + "ex", + "deserunt", + "mollit", + "cillum", + "aliquip", + "excepteur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Latisha Ortiz" + }, + { + "id": 1, + "name": "Ila Marshall" + }, + { + "id": 2, + "name": "Kathie Strong" + } + ], + "greeting": "Hello, Marci! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82113ff45aaf64d6b75", + "index": 23, + "guid": "a074b7d2-92de-4728-9032-ac711cc8ca1b", + "isActive": true, + "balance": "$1,927.67", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Lorie", + "last": "Haynes" + }, + "company": "CUBIX", + "email": "lorie.haynes@cubix.io", + "phone": "+1 (914) 479-2574", + "address": "209 Stoddard Place, Grahamtown, New Hampshire, 3422", + "about": "Commodo eu reprehenderit aute veniam occaecat eiusmod ex enim mollit elit. Officia fugiat proident cillum sint sint. In anim occaecat in dolore pariatur occaecat dolore eu duis sint veniam labore tempor id.\r\n", + "registered": "Thursday, June 19, 2014 3:03 AM", + "latitude": -80.694066, + "longitude": 98.315178, + "tags": [ + "proident", + "est", + "nulla", + "minim", + "aute", + "duis", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Shana Jensen" + }, + { + "id": 1, + "name": "Audra Hays" + }, + { + "id": 2, + "name": "Shannon Stewart" + } + ], + "greeting": "Hello, Lorie! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8216788a07fe633c5a6", + "index": 24, + "guid": "ff361134-2ce3-4cce-b043-d571e87a041d", + "isActive": false, + "balance": "$1,274.62", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Parker", + "last": "Higgins" + }, + "company": "ISOSTREAM", + "email": "parker.higgins@isostream.net", + "phone": "+1 (965) 467-3975", + "address": "908 Division Place, Homeland, South Carolina, 2577", + "about": "Laborum minim consectetur ipsum incididunt cupidatat ex ad labore eu non est consequat. Tempor eiusmod commodo Lorem enim aliquip ad non sint ipsum culpa amet. Eu sit amet velit est sit cupidatat aliquip magna proident id veniam Lorem dolore. Eiusmod ex amet proident enim ipsum proident mollit adipisicing ut.\r\n", + "registered": "Monday, August 25, 2014 4:51 AM", + "latitude": -28.784274, + "longitude": -151.224185, + "tags": [ + "ea", + "cupidatat", + "do", + "culpa", + "ea", + "ullamco", + "nulla" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bridgette Tyler" + }, + { + "id": 1, + "name": "Harris Pollard" + }, + { + "id": 2, + "name": "Davenport Skinner" + } + ], + "greeting": "Hello, Parker! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ff898d23056717a4", + "index": 25, + "guid": "f5e85a0d-f46e-427e-86a7-657eaaadb169", + "isActive": true, + "balance": "$3,413.58", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "brown", + "name": { + "first": "Rios", + "last": "Reilly" + }, + "company": "QUILTIGEN", + "email": "rios.reilly@quiltigen.com", + "phone": "+1 (982) 565-3930", + "address": "789 Beekman Place, Wiscon, Texas, 8745", + "about": "Consectetur qui do sint deserunt voluptate sunt dolor in officia aliquip. Eu irure sit veniam nostrud culpa laboris. Commodo nostrud cillum nulla nostrud.\r\n", + "registered": "Thursday, September 4, 2014 1:54 PM", + "latitude": 6.093115, + "longitude": 145.037939, + "tags": [ + "eu", + "consectetur", + "veniam", + "pariatur", + "laboris", + "ad", + "cupidatat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sweet Conley" + }, + { + "id": 1, + "name": "Key Grant" + }, + { + "id": 2, + "name": "Guthrie Moss" + } + ], + "greeting": "Hello, Rios! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82125f7c765fcbb1743", + "index": 26, + "guid": "e5f12323-5c7c-4103-b20c-1a633845a28c", + "isActive": true, + "balance": "$1,645.61", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Hurley", + "last": "Cooke" + }, + "company": "QUORDATE", + "email": "hurley.cooke@quordate.biz", + "phone": "+1 (841) 404-3894", + "address": "369 Denton Place, Curtice, South Dakota, 2613", + "about": "Nulla non in aliqua sit mollit pariatur do mollit. Ut pariatur ut velit minim. Fugiat deserunt velit duis consequat labore culpa voluptate sint voluptate consectetur officia voluptate et laborum. Et exercitation ut eu pariatur minim velit elit. Dolore amet officia ipsum voluptate occaecat eiusmod cupidatat do dolore consequat esse consectetur aliquip.\r\n", + "registered": "Sunday, April 13, 2014 11:42 AM", + "latitude": -78.463811, + "longitude": 36.580914, + "tags": [ + "deserunt", + "nisi", + "do", + "enim", + "nisi", + "qui", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Harvey Norman" + }, + { + "id": 1, + "name": "Porter Shannon" + }, + { + "id": 2, + "name": "Reyes Goodman" + } + ], + "greeting": "Hello, Hurley! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821f375b9cabd418303", + "index": 27, + "guid": "452fe001-7fff-4f69-9336-00e3e80e9792", + "isActive": true, + "balance": "$2,608.67", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Jill", + "last": "Blair" + }, + "company": "ORBALIX", + "email": "jill.blair@orbalix.co.uk", + "phone": "+1 (863) 519-2778", + "address": "680 Cleveland Street, Kohatk, Ohio, 1688", + "about": "Do labore sint cupidatat dolor. Mollit nulla voluptate nostrud tempor ad cillum in mollit officia reprehenderit duis commodo veniam ad. Adipisicing enim adipisicing consequat sint minim ut. Cupidatat non ullamco sunt mollit proident. Aliquip dolore dolor excepteur cupidatat. Consectetur duis adipisicing qui enim aute quis veniam deserunt occaecat. Duis elit exercitation ullamco voluptate aliqua.\r\n", + "registered": "Tuesday, September 30, 2014 11:23 PM", + "latitude": -33.279869, + "longitude": 6.221211, + "tags": [ + "nostrud", + "elit", + "adipisicing", + "esse", + "in", + "commodo", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tania Flores" + }, + { + "id": 1, + "name": "Nina Blackburn" + }, + { + "id": 2, + "name": "Mathews Fischer" + } + ], + "greeting": "Hello, Jill! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821304372a99199671f", + "index": 28, + "guid": "d160ded3-911c-4ce2-a314-9ed9a8e6fa9b", + "isActive": true, + "balance": "$3,005.54", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "brown", + "name": { + "first": "Estela", + "last": "Dalton" + }, + "company": "POWERNET", + "email": "estela.dalton@powernet.tv", + "phone": "+1 (959) 527-2607", + "address": "820 Montauk Avenue, Whitmer, Maine, 3867", + "about": "Commodo est ullamco sit eu irure tempor veniam deserunt in aute cillum tempor. Occaecat velit et deserunt incididunt sint do eu consectetur enim ullamco consectetur esse ipsum pariatur. Tempor exercitation dolore tempor enim. Dolor esse est magna occaecat. Elit culpa sint non ea exercitation. Aliquip nostrud aliquip culpa Lorem cillum incididunt do sit sunt velit id. Proident sit proident est velit consequat cillum officia in et.\r\n", + "registered": "Sunday, April 13, 2014 5:53 AM", + "latitude": 32.713335, + "longitude": 174.505048, + "tags": [ + "aliquip", + "dolore", + "proident", + "pariatur", + "elit", + "cillum", + "id" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Gibson Durham" + }, + { + "id": 1, + "name": "Carolina Cooley" + }, + { + "id": 2, + "name": "Rosa Mcintyre" + } + ], + "greeting": "Hello, Estela! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82187c6143c96c5ce1f", + "index": 29, + "guid": "f62ae8de-8905-4ac0-9b5c-ce12dad96a86", + "isActive": false, + "balance": "$1,859.49", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": { + "first": "Martina", + "last": "Jacobson" + }, + "company": "CUIZINE", + "email": "martina.jacobson@cuizine.name", + "phone": "+1 (927) 493-2997", + "address": "234 Utica Avenue, Hinsdale, Vermont, 459", + "about": "Pariatur nulla ad sint tempor qui in id aliqua ex et ut. Qui occaecat quis veniam mollit officia duis ad ea. Est consectetur sit sint proident sit do. Id ut incididunt tempor id irure. Qui commodo cillum labore anim eiusmod exercitation ea qui nulla qui amet.\r\n", + "registered": "Sunday, February 9, 2014 3:54 AM", + "latitude": -36.70558, + "longitude": -140.397297, + "tags": [ + "voluptate", + "adipisicing", + "do", + "deserunt", + "aliquip", + "est", + "minim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Griffith Martinez" + }, + { + "id": 1, + "name": "Richard Chavez" + }, + { + "id": 2, + "name": "Mckinney Butler" + } + ], + "greeting": "Hello, Martina! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821392ebf56130b6eaa", + "index": 30, + "guid": "91c5f9c9-d3c7-435a-98cc-06e360c12e1d", + "isActive": true, + "balance": "$3,693.79", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "brown", + "name": { + "first": "Althea", + "last": "Valencia" + }, + "company": "BLANET", + "email": "althea.valencia@blanet.me", + "phone": "+1 (887) 501-3212", + "address": "911 Adams Street, Brambleton, Delaware, 5831", + "about": "Quis culpa exercitation dolor anim. Id labore ea aute aliqua. Dolor dolore sit duis anim cillum nostrud officia dolor sit. Laborum tempor dolore id consequat.\r\n", + "registered": "Saturday, January 11, 2014 6:27 PM", + "latitude": -39.101471, + "longitude": -22.991091, + "tags": [ + "eu", + "anim", + "elit", + "pariatur", + "cupidatat", + "cupidatat", + "enim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Dawson Foreman" + }, + { + "id": 1, + "name": "Sanford Meyer" + }, + { + "id": 2, + "name": "Ruth Barron" + } + ], + "greeting": "Hello, Althea! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82111b129b44454e349", + "index": 31, + "guid": "184ea3dd-af3f-400a-aa64-429b6cac091f", + "isActive": true, + "balance": "$1,351.78", + "picture": "http://placehold.it/32x32", + "age": 20, + "eyeColor": "blue", + "name": { + "first": "Morrison", + "last": "Chan" + }, + "company": "TALKALOT", + "email": "morrison.chan@talkalot.us", + "phone": "+1 (822) 448-3384", + "address": "599 Olive Street, Franklin, Palau, 3303", + "about": "Ex deserunt nulla velit dolore. Sunt sit ea irure incididunt aute sint do veniam. Sit ut ad ipsum est velit ea duis exercitation aliquip consectetur Lorem fugiat eu.\r\n", + "registered": "Sunday, February 16, 2014 2:04 PM", + "latitude": 61.099205, + "longitude": -37.736061, + "tags": [ + "eu", + "deserunt", + "pariatur", + "labore", + "reprehenderit", + "magna", + "consectetur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Norma Montoya" + }, + { + "id": 1, + "name": "Bailey Gillespie" + }, + { + "id": 2, + "name": "Candace Kent" + } + ], + "greeting": "Hello, Morrison! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821aec56cb0ce50b54b", + "index": 32, + "guid": "b211ab76-8674-4ed0-9a40-2087930468ad", + "isActive": false, + "balance": "$1,492.99", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Walters", + "last": "Potts" + }, + "company": "FLUMBO", + "email": "walters.potts@flumbo.ca", + "phone": "+1 (871) 461-3958", + "address": "438 Highland Avenue, Elwood, Arizona, 9669", + "about": "Lorem do Lorem aliquip ipsum. Elit labore reprehenderit tempor do. Incididunt labore ad eu occaecat enim laborum irure elit nulla Lorem anim sit exercitation velit. Proident ullamco voluptate aute ex et aute mollit nostrud. Adipisicing labore sit irure amet dolore nostrud. Tempor nulla aliqua culpa commodo aliqua ut esse velit mollit ad. Aliqua nulla enim non nisi laboris sint aute duis proident qui officia.\r\n", + "registered": "Saturday, September 6, 2014 11:47 AM", + "latitude": 64.732922, + "longitude": -168.513014, + "tags": [ + "tempor", + "amet", + "dolore", + "proident", + "reprehenderit", + "non", + "tempor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Freida Bailey" + }, + { + "id": 1, + "name": "Bernice Curry" + }, + { + "id": 2, + "name": "Ochoa Jefferson" + } + ], + "greeting": "Hello, Walters! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82137be18ee6b852a45", + "index": 33, + "guid": "e8c67cca-c977-4668-9aff-46bfde1cd3de", + "isActive": true, + "balance": "$3,747.65", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "blue", + "name": { + "first": "Meredith", + "last": "Santana" + }, + "company": "GADTRON", + "email": "meredith.santana@gadtron.org", + "phone": "+1 (836) 438-3637", + "address": "999 Centre Street, Chaparrito, Colorado, 4540", + "about": "Magna nisi laboris sit quis duis anim et ullamco nostrud exercitation. Tempor enim nisi non culpa sit ex elit labore proident veniam dolore anim ex. Nostrud est qui do magna proident et. Nulla ea laboris incididunt elit labore id mollit reprehenderit. Amet in Lorem exercitation tempor voluptate labore anim adipisicing labore in dolor proident labore. Lorem labore duis ex Lorem nulla. Veniam in fugiat ex ullamco officia elit eiusmod enim.\r\n", + "registered": "Sunday, March 2, 2014 1:38 PM", + "latitude": 80.220732, + "longitude": 79.102966, + "tags": [ + "culpa", + "esse", + "velit", + "consectetur", + "incididunt", + "dolore", + "aliqua" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bernadine Manning" + }, + { + "id": 1, + "name": "Daphne Wyatt" + }, + { + "id": 2, + "name": "Keri Harrison" + } + ], + "greeting": "Hello, Meredith! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ecdd700bb0b77dc2", + "index": 34, + "guid": "b93f3a6f-f05b-4d7e-93b8-4502d9c76cd2", + "isActive": true, + "balance": "$3,059.56", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "brown", + "name": { + "first": "Samantha", + "last": "Finley" + }, + "company": "XYQAG", + "email": "samantha.finley@xyqag.biz", + "phone": "+1 (879) 568-2419", + "address": "349 Truxton Street, Haring, Missouri, 8383", + "about": "Consequat do id et quis eiusmod eu irure sunt qui. Mollit minim nulla magna duis nostrud cillum ullamco sunt adipisicing elit ex. Minim fugiat deserunt nostrud esse laboris ullamco sit sit magna. Tempor occaecat Lorem qui ad ut tempor excepteur. Et sunt ullamco officia et Lorem est. Ipsum dolor ut ut elit do nisi in aute consequat. Enim esse ex aliqua anim aliquip cupidatat do Lorem voluptate quis ea culpa incididunt reprehenderit.\r\n", + "registered": "Saturday, March 29, 2014 1:38 PM", + "latitude": 79.209401, + "longitude": -139.211605, + "tags": [ + "irure", + "eiusmod", + "nulla", + "officia", + "eu", + "elit", + "nisi" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mayer Justice" + }, + { + "id": 1, + "name": "Mae Hancock" + }, + { + "id": 2, + "name": "Sherri Bradshaw" + } + ], + "greeting": "Hello, Samantha! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8218ac5365ad300d1bd", + "index": 35, + "guid": "f3b50cb9-da35-47fe-b0fa-fec9fc84cf8e", + "isActive": false, + "balance": "$2,819.28", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "blue", + "name": { + "first": "Cohen", + "last": "Finch" + }, + "company": "SYNTAC", + "email": "cohen.finch@syntac.io", + "phone": "+1 (950) 459-2729", + "address": "436 Pineapple Street, Deercroft, Minnesota, 3218", + "about": "Sint velit officia quis esse. Nulla aute laborum veniam dolore tempor adipisicing proident. Duis irure esse nostrud veniam est mollit mollit voluptate eiusmod anim veniam eiusmod. Ullamco sunt sit sint minim ea reprehenderit qui consequat ipsum. Sint id voluptate reprehenderit irure nulla veniam eu Lorem enim nulla. Cupidatat amet pariatur dolor amet ex nostrud dolor ipsum tempor enim nulla aliquip tempor Lorem.\r\n", + "registered": "Wednesday, October 8, 2014 10:04 PM", + "latitude": -84.299718, + "longitude": 52.573184, + "tags": [ + "deserunt", + "eu", + "consectetur", + "ea", + "non", + "officia", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Stevenson Mcintosh" + }, + { + "id": 1, + "name": "Chrystal Oneill" + }, + { + "id": 2, + "name": "Janine Rowe" + } + ], + "greeting": "Hello, Cohen! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82161d5c66d4bd0996e", + "index": 36, + "guid": "6dfc2232-f3c6-4818-956a-e8c683fd69fe", + "isActive": true, + "balance": "$3,262.20", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Joy", + "last": "Moran" + }, + "company": "UPDAT", + "email": "joy.moran@updat.net", + "phone": "+1 (968) 581-3365", + "address": "279 Kosciusko Street, Smock, Northern Mariana Islands, 5007", + "about": "Quis elit pariatur eu enim magna magna sunt dolore duis commodo. Pariatur sint duis ex aute eu est deserunt culpa fugiat minim non. Tempor consequat consectetur consequat sit incididunt officia id. Incididunt ex eiusmod excepteur aute mollit veniam quis excepteur occaecat excepteur deserunt reprehenderit. Est sit laboris eu dolor. Sunt voluptate quis aliquip nulla ex irure velit in. Aliqua sit id eiusmod amet commodo pariatur deserunt voluptate qui minim ex incididunt voluptate.\r\n", + "registered": "Saturday, February 8, 2014 9:55 PM", + "latitude": 53.735731, + "longitude": 46.00211, + "tags": [ + "exercitation", + "dolor", + "minim", + "incididunt", + "laborum", + "qui", + "sit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Joyce Mckay" + }, + { + "id": 1, + "name": "Turner Murray" + }, + { + "id": 2, + "name": "Jackson Jackson" + } + ], + "greeting": "Hello, Joy! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a9817ca1a9be6517", + "index": 37, + "guid": "84fe8707-cfbc-4434-98bb-faf9cb97471a", + "isActive": true, + "balance": "$3,224.80", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "blue", + "name": { + "first": "Brennan", + "last": "Stafford" + }, + "company": "LOVEPAD", + "email": "brennan.stafford@lovepad.com", + "phone": "+1 (873) 405-3600", + "address": "471 Ryder Avenue, Succasunna, Marshall Islands, 2595", + "about": "Et officia quis magna laborum et proident labore elit do Lorem reprehenderit irure. Laborum culpa culpa voluptate commodo consequat non et amet. Mollit cupidatat irure magna sint commodo ipsum proident tempor est. Laboris exercitation aliqua aute deserunt do in aliqua minim ex excepteur. Consequat in minim officia labore laboris laboris occaecat occaecat. Ex qui aliquip sint consectetur elit excepteur incididunt eu non laborum do eu excepteur.\r\n", + "registered": "Saturday, May 31, 2014 3:28 PM", + "latitude": -69.429728, + "longitude": 46.837644, + "tags": [ + "proident", + "ad", + "non", + "duis", + "occaecat", + "proident", + "non" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Jones Kirk" + }, + { + "id": 1, + "name": "Howe Drake" + }, + { + "id": 2, + "name": "Kimberly Jennings" + } + ], + "greeting": "Hello, Brennan! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8219251dc49e1cb846a", + "index": 38, + "guid": "c260bce1-46ac-4e84-9490-13eb8202904e", + "isActive": true, + "balance": "$1,330.69", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "brown", + "name": { + "first": "Neal", + "last": "Mooney" + }, + "company": "SOFTMICRO", + "email": "neal.mooney@softmicro.biz", + "phone": "+1 (883) 463-3623", + "address": "818 Lancaster Avenue, Chelsea, New Jersey, 3590", + "about": "Reprehenderit anim nostrud adipisicing non minim ea. Elit deserunt id in mollit nisi. Pariatur in consequat irure aliqua laboris ipsum.\r\n", + "registered": "Wednesday, May 21, 2014 4:33 PM", + "latitude": -57.826881, + "longitude": 154.840249, + "tags": [ + "consequat", + "aliquip", + "pariatur", + "nulla", + "dolore", + "deserunt", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Beach Roman" + }, + { + "id": 1, + "name": "Nash Young" + }, + { + "id": 2, + "name": "Carey Dale" + } + ], + "greeting": "Hello, Neal! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821cedc96e4f2209487", + "index": 39, + "guid": "ed7bbe27-811c-44e4-896e-cdf6ef62e048", + "isActive": false, + "balance": "$3,148.21", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "green", + "name": { + "first": "Roy", + "last": "Becker" + }, + "company": "KONGENE", + "email": "roy.becker@kongene.co.uk", + "phone": "+1 (895) 426-2172", + "address": "414 Seagate Avenue, Watrous, Virgin Islands, 3905", + "about": "Commodo mollit do minim dolor magna occaecat labore Lorem eiusmod. Occaecat mollit occaecat ex anim est amet irure non minim. Tempor laborum cupidatat tempor ex Lorem cupidatat incididunt ullamco fugiat Lorem consequat labore Lorem non.\r\n", + "registered": "Sunday, August 17, 2014 3:16 PM", + "latitude": -2.609533, + "longitude": -143.844769, + "tags": [ + "do", + "culpa", + "sint", + "ea", + "duis", + "aliqua", + "anim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lowery Hull" + }, + { + "id": 1, + "name": "Abbott Oneal" + }, + { + "id": 2, + "name": "Nellie Hammond" + } + ], + "greeting": "Hello, Roy! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821668031e7f50e30e9", + "index": 40, + "guid": "14dc3a87-0acf-4edd-93e4-ddcbeeecf96b", + "isActive": false, + "balance": "$2,617.16", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "brown", + "name": { + "first": "Courtney", + "last": "Watson" + }, + "company": "ISOSWITCH", + "email": "courtney.watson@isoswitch.tv", + "phone": "+1 (882) 468-2163", + "address": "385 Douglass Street, Iberia, Oregon, 7802", + "about": "Culpa sunt amet eu magna id quis quis irure velit. Culpa nostrud do enim proident officia. Laboris laborum laborum esse irure proident laborum amet sunt ipsum dolor nulla non ipsum sint. Amet deserunt in esse aliquip laboris proident fugiat nisi cillum ullamco occaecat est. Reprehenderit laborum enim labore ex. Velit do adipisicing irure dolor pariatur duis magna velit. Laborum sint laborum eu anim aliquip adipisicing labore.\r\n", + "registered": "Tuesday, July 22, 2014 6:03 PM", + "latitude": -75.831312, + "longitude": -172.468604, + "tags": [ + "dolor", + "velit", + "et", + "id", + "cupidatat", + "exercitation", + "laborum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mccray Gomez" + }, + { + "id": 1, + "name": "Hoover Rasmussen" + }, + { + "id": 2, + "name": "Hillary Castillo" + } + ], + "greeting": "Hello, Courtney! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8213c480453670f0428", + "index": 41, + "guid": "e7b97ea6-a13f-42ab-a998-e48614000aca", + "isActive": true, + "balance": "$1,499.72", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Dale", + "last": "Love" + }, + "company": "BULLJUICE", + "email": "dale.love@bulljuice.name", + "phone": "+1 (933) 588-3310", + "address": "603 Myrtle Avenue, Allentown, Utah, 793", + "about": "Ad quis anim commodo nulla et anim minim commodo irure excepteur. Pariatur ut anim aliquip id ex ipsum exercitation irure qui in nisi quis. Cillum deserunt duis dolore quis nostrud incididunt ipsum ea ipsum id fugiat eu voluptate nisi. Exercitation laborum fugiat irure anim. Ex nostrud aliqua deserunt amet.\r\n", + "registered": "Tuesday, July 22, 2014 1:19 PM", + "latitude": 55.335017, + "longitude": 101.730023, + "tags": [ + "ea", + "non", + "fugiat", + "Lorem", + "tempor", + "ut", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Spears Diaz" + }, + { + "id": 1, + "name": "Nora Dominguez" + }, + { + "id": 2, + "name": "Tamra Paul" + } + ], + "greeting": "Hello, Dale! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c82bf70ca08206a9", + "index": 42, + "guid": "4367142d-e2b7-475c-b430-ac617295fbfc", + "isActive": false, + "balance": "$2,698.29", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "blue", + "name": { + "first": "Gibbs", + "last": "Hunt" + }, + "company": "COWTOWN", + "email": "gibbs.hunt@cowtown.me", + "phone": "+1 (960) 424-3404", + "address": "758 Suydam Place, Adamstown, North Carolina, 2800", + "about": "Veniam qui incididunt officia amet commodo nostrud. Magna consectetur consectetur officia Lorem amet sit officia excepteur minim consectetur pariatur dolore. Mollit fugiat aliqua consectetur qui non elit in aliquip culpa Lorem consectetur velit ad.\r\n", + "registered": "Monday, February 10, 2014 4:10 PM", + "latitude": 62.263652, + "longitude": 64.978136, + "tags": [ + "et", + "dolor", + "aute", + "minim", + "sunt", + "veniam", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Henry Schmidt" + }, + { + "id": 1, + "name": "Herman Wynn" + }, + { + "id": 2, + "name": "Wilda Grimes" + } + ], + "greeting": "Hello, Gibbs! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210b1096c8a2f821c4", + "index": 43, + "guid": "5184508c-47f7-48b5-85ac-d15ed747ed07", + "isActive": true, + "balance": "$3,460.45", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Francis", + "last": "Barton" + }, + "company": "COMTRAIL", + "email": "francis.barton@comtrail.us", + "phone": "+1 (881) 419-2936", + "address": "562 Ferris Street, Ola, Maryland, 3838", + "about": "Duis minim laboris in reprehenderit id ut laborum esse consequat. In dolore sunt consequat non fugiat do duis duis. Officia ipsum eiusmod laboris do aliqua aute velit minim nulla nisi. Dolor incididunt enim est eu cupidatat. Dolor commodo sit consectetur irure aliqua ea enim esse reprehenderit ullamco.\r\n", + "registered": "Thursday, April 10, 2014 2:48 PM", + "latitude": -35.457713, + "longitude": 141.805123, + "tags": [ + "tempor", + "officia", + "quis", + "tempor", + "ex", + "mollit", + "amet" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mayra Walters" + }, + { + "id": 1, + "name": "Casey Gross" + }, + { + "id": 2, + "name": "Aisha Santos" + } + ], + "greeting": "Hello, Francis! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821890ad48b2eaabcfb", + "index": 44, + "guid": "07497907-7e6f-471d-af9d-1dcbcf056a56", + "isActive": true, + "balance": "$2,166.79", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Dina", + "last": "Travis" + }, + "company": "VENOFLEX", + "email": "dina.travis@venoflex.ca", + "phone": "+1 (921) 485-3865", + "address": "366 Tillary Street, Century, New York, 6335", + "about": "Dolor sunt culpa enim sint officia sint id do ut anim ex in. Dolore est irure aliquip nulla laborum aliqua tempor id ea mollit in ad deserunt. Qui ullamco duis qui elit excepteur. Aute proident duis veniam enim commodo non minim id. Consequat anim eiusmod consectetur ut et labore officia ex ad cillum occaecat. Sit irure officia veniam sint et consequat reprehenderit officia qui. Aute esse nulla ad quis reprehenderit duis.\r\n", + "registered": "Friday, September 12, 2014 5:53 AM", + "latitude": 24.764352, + "longitude": 148.493552, + "tags": [ + "qui", + "officia", + "dolor", + "velit", + "ex", + "veniam", + "ullamco" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sharpe Foster" + }, + { + "id": 1, + "name": "Kathrine Ayers" + }, + { + "id": 2, + "name": "Cox Acosta" + } + ], + "greeting": "Hello, Dina! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82101996fc944f7f215", + "index": 45, + "guid": "46b331f8-78d3-403f-8c93-cbaac9438998", + "isActive": false, + "balance": "$2,193.84", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Haney", + "last": "Garrett" + }, + "company": "EZENTIA", + "email": "haney.garrett@ezentia.org", + "phone": "+1 (965) 596-2629", + "address": "162 Village Road, Southmont, Virginia, 6673", + "about": "Laboris do veniam exercitation officia id eu minim irure Lorem laborum. Sint magna aliquip ad elit eiusmod cillum laborum. Adipisicing aliquip nulla mollit ipsum cupidatat nisi duis irure ullamco. Et elit nulla nisi culpa mollit esse in. Dolore non nulla anim magna enim aliquip quis amet non tempor incididunt dolor.\r\n", + "registered": "Monday, August 18, 2014 3:42 PM", + "latitude": 6.549909, + "longitude": 32.226887, + "tags": [ + "laboris", + "duis", + "culpa", + "qui", + "dolor", + "esse", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mariana Sharp" + }, + { + "id": 1, + "name": "Sue Baldwin" + }, + { + "id": 2, + "name": "Ross Arnold" + } + ], + "greeting": "Hello, Haney! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821453cc33bd1abd21d", + "index": 46, + "guid": "313f0c45-6eaf-46d9-a28d-ca31e79d2c1c", + "isActive": false, + "balance": "$2,303.36", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "blue", + "name": { + "first": "Gale", + "last": "Robles" + }, + "company": "NEWCUBE", + "email": "gale.robles@newcube.biz", + "phone": "+1 (996) 558-2811", + "address": "597 Java Street, Sanborn, North Dakota, 1789", + "about": "Anim fugiat do nisi dolor sunt consequat irure quis laborum. Nisi cupidatat dolore excepteur irure ea minim proident excepteur exercitation ut voluptate deserunt. Ad do amet id voluptate enim commodo ex. Sunt sint quis sint aute do ea aliqua. Enim ullamco dolore proident qui mollit irure consequat. Nostrud sunt adipisicing elit incididunt do laboris ad officia ea amet id reprehenderit nulla.\r\n", + "registered": "Tuesday, July 15, 2014 2:25 PM", + "latitude": -21.549196, + "longitude": -97.373962, + "tags": [ + "ullamco", + "amet", + "sint", + "elit", + "tempor", + "ex", + "pariatur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bell Gould" + }, + { + "id": 1, + "name": "Denise Kirby" + }, + { + "id": 2, + "name": "Hess Hinton" + } + ], + "greeting": "Hello, Gale! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821ed643a410e2d79dc", + "index": 47, + "guid": "2620cb2b-df00-4303-a5ff-768ffb1697c5", + "isActive": true, + "balance": "$2,890.73", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "blue", + "name": { + "first": "Alisha", + "last": "Hamilton" + }, + "company": "FITCORE", + "email": "alisha.hamilton@fitcore.io", + "phone": "+1 (853) 468-3192", + "address": "257 Bayard Street, Coldiron, Oklahoma, 9228", + "about": "Sunt nostrud sunt magna amet excepteur est tempor veniam aliqua. Laboris id aliquip fugiat exercitation dolore veniam et anim duis sit esse ex elit ullamco. Lorem commodo exercitation in sit cillum ipsum do dolor.\r\n", + "registered": "Tuesday, March 11, 2014 12:29 PM", + "latitude": 85.840017, + "longitude": -57.093095, + "tags": [ + "consectetur", + "commodo", + "est", + "ut", + "incididunt", + "elit", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Steele Ellis" + }, + { + "id": 1, + "name": "Serena Emerson" + }, + { + "id": 2, + "name": "Betty Langley" + } + ], + "greeting": "Hello, Alisha! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8215c6db03112e3e13d", + "index": 48, + "guid": "28b59ead-0fd8-480b-8eb8-2d75290934f6", + "isActive": false, + "balance": "$3,497.68", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "blue", + "name": { + "first": "Kaufman", + "last": "Williams" + }, + "company": "COMBOGENE", + "email": "kaufman.williams@combogene.net", + "phone": "+1 (820) 465-3213", + "address": "353 Clarendon Road, Wilmington, Michigan, 3811", + "about": "Irure id sint elit mollit occaecat occaecat veniam elit reprehenderit esse officia cillum. Aute aute occaecat ipsum commodo laborum adipisicing fugiat aliquip dolore. Deserunt id excepteur enim eu adipisicing nulla ut non est dolore est. Culpa magna et sit et non ex.\r\n", + "registered": "Sunday, September 7, 2014 5:57 AM", + "latitude": 10.667631, + "longitude": 157.707911, + "tags": [ + "consequat", + "dolor", + "deserunt", + "amet", + "Lorem", + "aliqua", + "minim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Horn Franco" + }, + { + "id": 1, + "name": "Kathleen Hickman" + }, + { + "id": 2, + "name": "Rosario Scott" + } + ], + "greeting": "Hello, Kaufman! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821045c82593b79538d", + "index": 49, + "guid": "cef964ed-4208-41eb-a8d8-916d95d18f8a", + "isActive": true, + "balance": "$3,808.37", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "blue", + "name": { + "first": "Tran", + "last": "Gallegos" + }, + "company": "CONCILITY", + "email": "tran.gallegos@concility.com", + "phone": "+1 (925) 487-2143", + "address": "969 Schermerhorn Street, Watchtower, Idaho, 413", + "about": "Velit ut enim cupidatat adipisicing culpa in non incididunt exercitation dolor pariatur. Deserunt dolor occaecat dolor officia ipsum occaecat tempor nisi. Culpa et culpa aute incididunt et labore sunt cillum nulla. Reprehenderit culpa enim laborum nostrud consectetur velit nulla consequat aliqua non exercitation sunt nulla aliqua. Labore incididunt aliqua aliqua anim duis culpa elit labore. Aliqua ipsum mollit ut sint aliquip in aute do qui amet.\r\n", + "registered": "Friday, May 9, 2014 9:24 AM", + "latitude": -49.148583, + "longitude": 4.911715, + "tags": [ + "labore", + "duis", + "proident", + "adipisicing", + "nisi", + "tempor", + "nisi" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nicole Holloway" + }, + { + "id": 1, + "name": "Minerva Beasley" + }, + { + "id": 2, + "name": "Bowers Suarez" + } + ], + "greeting": "Hello, Tran! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821fbd8e9836595f96d", + "index": 50, + "guid": "966b2f5e-5686-4381-9ab7-87bc9ac9968b", + "isActive": true, + "balance": "$1,351.67", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Mable", + "last": "Lambert" + }, + "company": "ZENSUS", + "email": "mable.lambert@zensus.biz", + "phone": "+1 (917) 404-3441", + "address": "174 Concord Street, Somerset, Nebraska, 7318", + "about": "Ad minim esse ipsum incididunt incididunt enim Lorem nulla excepteur velit fugiat ullamco amet reprehenderit. Labore velit fugiat proident enim Lorem mollit ex. Tempor voluptate cupidatat officia nostrud qui. Veniam duis voluptate deserunt commodo consectetur eiusmod qui excepteur aliquip. Exercitation laborum Lorem excepteur ipsum aliqua fugiat reprehenderit ea dolore deserunt commodo cillum ipsum eu. Ullamco quis voluptate eiusmod ea aute et pariatur consequat duis occaecat nulla aliquip.\r\n", + "registered": "Sunday, May 25, 2014 4:54 PM", + "latitude": 79.811908, + "longitude": -62.629133, + "tags": [ + "sit", + "non", + "reprehenderit", + "exercitation", + "dolor", + "labore", + "irure" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Rosie Gill" + }, + { + "id": 1, + "name": "Parrish Dean" + }, + { + "id": 2, + "name": "Annmarie Delacruz" + } + ], + "greeting": "Hello, Mable! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c85822cf149b785f", + "index": 51, + "guid": "1afd0e8f-b518-4bca-a4df-29e9b6a961d6", + "isActive": true, + "balance": "$2,628.22", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "brown", + "name": { + "first": "Sadie", + "last": "Clarke" + }, + "company": "UNISURE", + "email": "sadie.clarke@unisure.co.uk", + "phone": "+1 (905) 480-2930", + "address": "326 Gilmore Court, Hamilton, Montana, 9217", + "about": "Dolore do nisi reprehenderit consectetur in. In esse sit proident enim duis veniam quis laboris nulla cillum adipisicing veniam aute. Culpa sunt ex exercitation sit esse exercitation dolor ea enim ad est aute consequat qui. Esse esse nulla eiusmod eiusmod ullamco esse cillum aute ea id ex quis. Pariatur officia Lorem aute officia anim velit velit elit sint voluptate. Aliquip ad velit velit laboris et culpa. Do consectetur aliqua sit sunt eu anim culpa ut incididunt et.\r\n", + "registered": "Sunday, April 27, 2014 10:37 AM", + "latitude": -71.828101, + "longitude": -138.908359, + "tags": [ + "nostrud", + "amet", + "minim", + "occaecat", + "proident", + "sint", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Christa Estes" + }, + { + "id": 1, + "name": "Alana Schneider" + }, + { + "id": 2, + "name": "Frank Spears" + } + ], + "greeting": "Hello, Sadie! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821aa6b44e8d20db81c", + "index": 52, + "guid": "e0220e02-565c-424a-8834-f955ccb72f7d", + "isActive": false, + "balance": "$2,918.63", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "brown", + "name": { + "first": "Deana", + "last": "Fletcher" + }, + "company": "VISALIA", + "email": "deana.fletcher@visalia.tv", + "phone": "+1 (815) 430-2641", + "address": "347 Tehama Street, Hollins, Washington, 5953", + "about": "Est consequat id ad Lorem consequat quis ullamco minim pariatur ipsum cillum. Enim exercitation qui duis cillum ea amet ea sint proident officia dolor non. Irure culpa cillum minim officia est culpa sit.\r\n", + "registered": "Monday, May 5, 2014 1:51 AM", + "latitude": 51.516197, + "longitude": 80.400628, + "tags": [ + "ut", + "tempor", + "pariatur", + "ex", + "dolore", + "deserunt", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hansen Estrada" + }, + { + "id": 1, + "name": "Regina Munoz" + }, + { + "id": 2, + "name": "Bethany Cabrera" + } + ], + "greeting": "Hello, Deana! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82120c61930bc25e2ff", + "index": 53, + "guid": "99c62c99-ef16-4eb7-a41d-80feafca740a", + "isActive": false, + "balance": "$1,836.96", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "brown", + "name": { + "first": "Long", + "last": "Sandoval" + }, + "company": "GINKLE", + "email": "long.sandoval@ginkle.name", + "phone": "+1 (989) 541-2327", + "address": "161 Crown Street, Omar, Kentucky, 8615", + "about": "Et ea eiusmod ex consequat culpa proident. Reprehenderit proident ullamco ullamco aliquip incididunt ullamco sit proident dolore nulla fugiat sit laboris. Adipisicing ullamco laborum nulla exercitation reprehenderit irure ex.\r\n", + "registered": "Tuesday, July 29, 2014 12:14 AM", + "latitude": -20.766582, + "longitude": 74.616145, + "tags": [ + "et", + "consequat", + "duis", + "excepteur", + "sint", + "sunt", + "aliqua" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Aline Rosa" + }, + { + "id": 1, + "name": "Olivia Quinn" + }, + { + "id": 2, + "name": "Fowler Carter" + } + ], + "greeting": "Hello, Long! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821cc7ceb7da88472ed", + "index": 54, + "guid": "3a965f51-48f9-4d10-8a9b-a0b529749c93", + "isActive": true, + "balance": "$2,950.94", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "green", + "name": { + "first": "Rush", + "last": "Thornton" + }, + "company": "AQUASSEUR", + "email": "rush.thornton@aquasseur.me", + "phone": "+1 (916) 493-2777", + "address": "344 Sunnyside Avenue, Brethren, California, 9529", + "about": "Duis ut consequat eu laborum voluptate eu Lorem cillum ad in commodo adipisicing. Excepteur aliquip sint dolor voluptate cillum nisi mollit mollit laborum ex culpa adipisicing voluptate. Cupidatat do sit fugiat amet irure. Cupidatat ex ut commodo reprehenderit veniam sit est officia ad pariatur aliquip.\r\n", + "registered": "Thursday, September 25, 2014 11:09 AM", + "latitude": -61.409359, + "longitude": -87.414208, + "tags": [ + "adipisicing", + "consequat", + "magna", + "Lorem", + "consequat", + "voluptate", + "eu" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hill Good" + }, + { + "id": 1, + "name": "Hartman Rice" + }, + { + "id": 2, + "name": "Petersen Hogan" + } + ], + "greeting": "Hello, Rush! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821e221eb5b7ed845c0", + "index": 55, + "guid": "ab0f1517-8c25-46ff-a281-0dcc932e2f9a", + "isActive": false, + "balance": "$2,070.82", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Alyce", + "last": "Perez" + }, + "company": "CAPSCREEN", + "email": "alyce.perez@capscreen.us", + "phone": "+1 (955) 432-2025", + "address": "896 Troutman Street, Williamson, West Virginia, 6491", + "about": "In consequat quis ex sint nisi proident esse excepteur quis nostrud. Enim incididunt ullamco sint quis eiusmod qui tempor ad laboris eiusmod nulla in aliquip sit. Nostrud fugiat fugiat Lorem laboris pariatur eiusmod amet ea do irure et. Excepteur pariatur consequat exercitation amet occaecat do aliqua non deserunt nulla cupidatat tempor id. Mollit ex incididunt et nulla culpa mollit veniam qui amet in excepteur pariatur. Commodo reprehenderit tempor laborum nisi anim minim deserunt eiusmod adipisicing deserunt ut eiusmod excepteur.\r\n", + "registered": "Monday, June 23, 2014 7:37 AM", + "latitude": -4.180393, + "longitude": 21.4789, + "tags": [ + "dolore", + "officia", + "laborum", + "aliquip", + "ex", + "eu", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Rosalind Lang" + }, + { + "id": 1, + "name": "Ina Pratt" + }, + { + "id": 2, + "name": "Tamika Mercer" + } + ], + "greeting": "Hello, Alyce! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8215e315c720e186460", + "index": 56, + "guid": "10727900-9be7-46d5-8f2c-7c6834d95a25", + "isActive": false, + "balance": "$1,907.19", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "brown", + "name": { + "first": "Snider", + "last": "Johnson" + }, + "company": "IPLAX", + "email": "snider.johnson@iplax.ca", + "phone": "+1 (883) 539-3127", + "address": "424 Hancock Street, Springdale, Wyoming, 7054", + "about": "Incididunt proident amet consectetur cupidatat ex officia labore cupidatat laborum. Tempor proident officia nisi Lorem. Lorem commodo commodo ea voluptate excepteur consequat anim quis excepteur sunt officia.\r\n", + "registered": "Saturday, March 8, 2014 9:23 PM", + "latitude": 16.66716, + "longitude": 17.844641, + "tags": [ + "esse", + "est", + "eu", + "dolor", + "ea", + "voluptate", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bauer Burt" + }, + { + "id": 1, + "name": "Lowe Boyd" + }, + { + "id": 2, + "name": "Moon Garcia" + } + ], + "greeting": "Hello, Snider! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82130f82b9ee9265e5f", + "index": 57, + "guid": "ba427ee5-5013-498f-a1a2-97a71b249a6e", + "isActive": true, + "balance": "$2,070.53", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Berry", + "last": "Carver" + }, + "company": "EVENTIX", + "email": "berry.carver@eventix.org", + "phone": "+1 (907) 508-2463", + "address": "415 Havens Place, Nile, Connecticut, 6089", + "about": "Quis dolor aute consequat sunt esse dolore Lorem pariatur reprehenderit incididunt aliqua. Officia sunt aute fugiat consectetur id exercitation aliquip velit do fugiat culpa. Et ad amet exercitation veniam ipsum duis qui sunt incididunt. Eiusmod commodo esse aliquip exercitation pariatur consequat nulla nulla quis eiusmod dolor. Ut consectetur qui culpa id veniam dolore pariatur quis est cillum voluptate esse. Sunt eiusmod adipisicing mollit est tempor ipsum dolore tempor. Velit consequat dolore cillum adipisicing id nulla veniam nisi velit in magna id anim.\r\n", + "registered": "Monday, January 13, 2014 10:45 PM", + "latitude": -60.884888, + "longitude": 139.360489, + "tags": [ + "eu", + "deserunt", + "minim", + "quis", + "eiusmod", + "sint", + "dolor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tanisha Dudley" + }, + { + "id": 1, + "name": "Dale Mcgowan" + }, + { + "id": 2, + "name": "Torres Pennington" + } + ], + "greeting": "Hello, Berry! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821bc354233d50ef914", + "index": 58, + "guid": "6ab626f8-6b71-4f17-ba39-4cc97fdf4855", + "isActive": false, + "balance": "$2,822.25", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Kramer", + "last": "Berg" + }, + "company": "INTRADISK", + "email": "kramer.berg@intradisk.biz", + "phone": "+1 (901) 534-3326", + "address": "455 Bath Avenue, Hoagland, Guam, 2206", + "about": "Labore ullamco aliquip id incididunt cupidatat pariatur. In magna et aliquip consectetur dolor ullamco aliqua reprehenderit. Ad velit nisi ex culpa consequat. Culpa eiusmod incididunt pariatur esse tempor officia mollit.\r\n", + "registered": "Friday, June 13, 2014 8:45 AM", + "latitude": -43.442578, + "longitude": 69.627031, + "tags": [ + "exercitation", + "dolor", + "quis", + "laboris", + "exercitation", + "sunt", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Love Hutchinson" + }, + { + "id": 1, + "name": "Hayden Marquez" + }, + { + "id": 2, + "name": "Macdonald Hahn" + } + ], + "greeting": "Hello, Kramer! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821cf5d51b48b4bd07a", + "index": 59, + "guid": "7b339d1e-d759-4fed-9e58-fd2608cdb0f2", + "isActive": false, + "balance": "$3,836.86", + "picture": "http://placehold.it/32x32", + "age": 20, + "eyeColor": "brown", + "name": { + "first": "Joann", + "last": "Elliott" + }, + "company": "ATOMICA", + "email": "joann.elliott@atomica.io", + "phone": "+1 (992) 429-2667", + "address": "788 Willow Place, Lindisfarne, Mississippi, 3656", + "about": "Ut consequat sunt ipsum minim velit. Lorem eiusmod dolor voluptate est deserunt cupidatat ut ipsum. Et et irure Lorem laborum sint mollit pariatur elit et enim eu eu sunt. Nisi do quis proident enim irure dolore ut Lorem fugiat quis voluptate non reprehenderit dolore.\r\n", + "registered": "Monday, May 12, 2014 6:04 PM", + "latitude": 14.309335, + "longitude": 32.596666, + "tags": [ + "nulla", + "magna", + "dolore", + "incididunt", + "fugiat", + "elit", + "veniam" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mason Hurst" + }, + { + "id": 1, + "name": "Castaneda Davidson" + }, + { + "id": 2, + "name": "Rasmussen Adkins" + } + ], + "greeting": "Hello, Joann! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821876778d3d011eb7b", + "index": 60, + "guid": "a016ab64-b6dd-42bc-8b5f-dbbab7a01d2a", + "isActive": true, + "balance": "$2,795.00", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Barbara", + "last": "Nolan" + }, + "company": "FURNAFIX", + "email": "barbara.nolan@furnafix.net", + "phone": "+1 (892) 600-2820", + "address": "540 Dennett Place, Mammoth, Rhode Island, 6151", + "about": "In velit officia quis Lorem. Ex quis cillum esse deserunt consectetur et nulla tempor. Lorem reprehenderit cillum excepteur ea veniam commodo et ad ullamco. Ex elit nisi non ipsum aliqua laborum sint aliqua. Reprehenderit consectetur dolore occaecat irure incididunt sunt.\r\n", + "registered": "Monday, April 21, 2014 11:59 AM", + "latitude": 71.103088, + "longitude": -78.48592, + "tags": [ + "eu", + "ullamco", + "cillum", + "est", + "commodo", + "nisi", + "tempor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Cynthia Aguilar" + }, + { + "id": 1, + "name": "Deanna Graves" + }, + { + "id": 2, + "name": "Bertha Caldwell" + } + ], + "greeting": "Hello, Barbara! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821d6e2aaf18a0b184f", + "index": 61, + "guid": "89ca6bc0-19c2-4d13-a37c-90d02005a6bc", + "isActive": false, + "balance": "$3,134.62", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "blue", + "name": { + "first": "Penelope", + "last": "William" + }, + "company": "UTARA", + "email": "penelope.william@utara.com", + "phone": "+1 (968) 575-2395", + "address": "276 Ralph Avenue, Ezel, New Mexico, 2656", + "about": "Pariatur officia anim dolore commodo ipsum labore sint officia. Lorem culpa ea sunt non. Voluptate irure voluptate ut cupidatat nulla nostrud.\r\n", + "registered": "Monday, July 7, 2014 11:29 PM", + "latitude": -83.184502, + "longitude": -91.222471, + "tags": [ + "anim", + "incididunt", + "aliqua", + "id", + "reprehenderit", + "laboris", + "consequat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maxwell Rocha" + }, + { + "id": 1, + "name": "Roach Bryan" + }, + { + "id": 2, + "name": "Woods Daugherty" + } + ], + "greeting": "Hello, Penelope! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8217abc85acd32fb42a", + "index": 62, + "guid": "cc58e868-7934-40a2-a350-13ad47e86a56", + "isActive": true, + "balance": "$3,734.05", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Kris", + "last": "Cotton" + }, + "company": "CORPORANA", + "email": "kris.cotton@corporana.biz", + "phone": "+1 (873) 412-2513", + "address": "650 Bushwick Court, Malott, Wisconsin, 9739", + "about": "Aliquip cupidatat exercitation exercitation consectetur. Sit id excepteur ea ut laborum irure ullamco laborum irure reprehenderit nisi aute eu. Ipsum do anim ea veniam do amet pariatur. Lorem consectetur labore deserunt anim deserunt aute.\r\n", + "registered": "Thursday, February 20, 2014 7:18 AM", + "latitude": 41.787092, + "longitude": -44.032192, + "tags": [ + "ex", + "incididunt", + "ut", + "cupidatat", + "commodo", + "commodo", + "occaecat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Angela Middleton" + }, + { + "id": 1, + "name": "Hicks Douglas" + }, + { + "id": 2, + "name": "Shaffer West" + } + ], + "greeting": "Hello, Kris! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821c20dda84da819450", + "index": 63, + "guid": "6ecab19a-0268-4d43-ad43-af2e4941b2e7", + "isActive": false, + "balance": "$2,748.00", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "William", + "last": "Haney" + }, + "company": "PROSURE", + "email": "william.haney@prosure.co.uk", + "phone": "+1 (890) 508-3193", + "address": "930 Hopkins Street, Bluetown, American Samoa, 9860", + "about": "Ad aute aliquip eiusmod tempor ullamco. Et ipsum consequat consequat magna do fugiat sint proident nostrud ad fugiat commodo dolor. Est anim do laboris id esse minim do voluptate occaecat nulla esse. Veniam sit dolore aliqua pariatur quis commodo enim nisi sint excepteur pariatur.\r\n", + "registered": "Friday, January 10, 2014 2:42 PM", + "latitude": 70.057151, + "longitude": -46.509685, + "tags": [ + "pariatur", + "est", + "adipisicing", + "aute", + "in", + "ex", + "eu" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Aida Lindsey" + }, + { + "id": 1, + "name": "Harper Roberson" + }, + { + "id": 2, + "name": "Flora Woods" + } + ], + "greeting": "Hello, William! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8219387ab6d8ebd8c1d", + "index": 64, + "guid": "732dfcb2-f8ab-44bf-a11b-d98f5b589993", + "isActive": true, + "balance": "$1,010.25", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Barrera", + "last": "Sellers" + }, + "company": "KLUGGER", + "email": "barrera.sellers@klugger.tv", + "phone": "+1 (968) 510-3228", + "address": "954 Verona Place, Homeworth, Louisiana, 6862", + "about": "Lorem ex voluptate cupidatat minim officia voluptate enim proident qui mollit dolore ipsum. Non consectetur adipisicing quis consectetur. Non est Lorem ad qui nostrud aute aliqua labore exercitation ea aliquip irure dolor nisi. Excepteur anim ex exercitation velit adipisicing qui excepteur enim culpa consequat sint. Velit consectetur velit culpa eu sint irure culpa consequat anim incididunt ad amet excepteur. Non est anim id sint ipsum id officia dolor commodo dolore labore consectetur.\r\n", + "registered": "Sunday, September 14, 2014 1:44 AM", + "latitude": -88.561319, + "longitude": -44.881241, + "tags": [ + "aliquip", + "eiusmod", + "nisi", + "aliquip", + "minim", + "ullamco", + "commodo" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bond Goff" + }, + { + "id": 1, + "name": "Cathleen Hatfield" + }, + { + "id": 2, + "name": "Pansy Burke" + } + ], + "greeting": "Hello, Barrera! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821ab4d9564c853db57", + "index": 65, + "guid": "bba298e6-ebca-4334-afe9-91807ed1b672", + "isActive": true, + "balance": "$2,345.80", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Mullen", + "last": "Stephenson" + }, + "company": "POLARIUM", + "email": "mullen.stephenson@polarium.name", + "phone": "+1 (935) 461-2692", + "address": "228 Court Square, Beaulieu, Kansas, 9445", + "about": "Ex magna proident do dolore nostrud aliqua aute dolore enim mollit consectetur sunt pariatur. Ex id duis enim duis laborum do tempor proident exercitation duis. Aute dolor cillum anim incididunt voluptate. Qui proident consectetur sit laboris ex enim excepteur qui.\r\n", + "registered": "Thursday, May 1, 2014 7:57 PM", + "latitude": 34.294733, + "longitude": 138.270754, + "tags": [ + "minim", + "veniam", + "do", + "consequat", + "esse", + "sit", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bobbi Pate" + }, + { + "id": 1, + "name": "Moran Griffith" + }, + { + "id": 2, + "name": "Marian Hopkins" + } + ], + "greeting": "Hello, Mullen! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8218eb53a92791a3185", + "index": 66, + "guid": "d297bed2-0986-4d22-b120-0e04c253fb34", + "isActive": false, + "balance": "$2,586.70", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "brown", + "name": { + "first": "Leigh", + "last": "Kidd" + }, + "company": "SUNCLIPSE", + "email": "leigh.kidd@sunclipse.me", + "phone": "+1 (870) 427-3520", + "address": "156 Keen Court, Chamizal, Indiana, 4250", + "about": "Ut sunt elit irure eiusmod aliquip consectetur in. Dolore id exercitation irure consectetur. Pariatur occaecat cillum nulla cillum esse deserunt minim consectetur aliqua duis eiusmod. Ea proident aliquip cillum ullamco duis elit Lorem dolore aliqua. Do fugiat culpa reprehenderit ea eu non enim.\r\n", + "registered": "Wednesday, January 15, 2014 1:57 AM", + "latitude": 61.842523, + "longitude": -56.422447, + "tags": [ + "non", + "non", + "incididunt", + "elit", + "aliqua", + "proident", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Jean Burns" + }, + { + "id": 1, + "name": "Patel Wilkinson" + }, + { + "id": 2, + "name": "Lillie Lane" + } + ], + "greeting": "Hello, Leigh! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8211c12bb8d837e265b", + "index": 67, + "guid": "6560e2f0-6bd2-4e19-bcd1-35297f162890", + "isActive": false, + "balance": "$2,040.59", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "green", + "name": { + "first": "Perry", + "last": "Leonard" + }, + "company": "CANDECOR", + "email": "perry.leonard@candecor.us", + "phone": "+1 (945) 556-2907", + "address": "359 Barbey Street, Grandview, Alaska, 9082", + "about": "Dolor fugiat consequat reprehenderit duis duis ex consectetur ea non ut. Irure et elit et mollit consequat dolor exercitation exercitation deserunt culpa mollit nulla. Est sunt deserunt incididunt exercitation eu aliqua qui elit labore id in eu ex.\r\n", + "registered": "Tuesday, August 26, 2014 11:34 PM", + "latitude": -67.974417, + "longitude": 97.189082, + "tags": [ + "sit", + "ex", + "ullamco", + "exercitation", + "adipisicing", + "non", + "laboris" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Alissa Ramsey" + }, + { + "id": 1, + "name": "Adela Bell" + }, + { + "id": 2, + "name": "Pearl Henderson" + } + ], + "greeting": "Hello, Perry! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8219a0b8e44380bd954", + "index": 68, + "guid": "4aa2bec3-3eaa-464f-9577-27f6c65e64b7", + "isActive": false, + "balance": "$3,911.59", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Barbra", + "last": "Mejia" + }, + "company": "ANIXANG", + "email": "barbra.mejia@anixang.ca", + "phone": "+1 (987) 517-2550", + "address": "635 Franklin Street, Allensworth, Florida, 1895", + "about": "Aliqua tempor excepteur velit do exercitation laborum commodo laboris aliqua nostrud. Aute aliquip nisi nulla labore id veniam ad voluptate non eiusmod minim mollit. Minim incididunt nostrud sint ex.\r\n", + "registered": "Thursday, January 16, 2014 11:04 PM", + "latitude": 44.320545, + "longitude": -61.392889, + "tags": [ + "cupidatat", + "excepteur", + "eu", + "consectetur", + "fugiat", + "aliquip", + "deserunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Potts Church" + }, + { + "id": 1, + "name": "Dianna Valentine" + }, + { + "id": 2, + "name": "Valeria Whitney" + } + ], + "greeting": "Hello, Barbra! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821b618d050a076972c", + "index": 69, + "guid": "ef47a627-c517-4f5a-931b-d67c8a18614b", + "isActive": false, + "balance": "$1,771.53", + "picture": "http://placehold.it/32x32", + "age": 32, + "eyeColor": "brown", + "name": { + "first": "Gonzales", + "last": "Walker" + }, + "company": "EVIDENDS", + "email": "gonzales.walker@evidends.org", + "phone": "+1 (984) 510-3347", + "address": "336 Pershing Loop, Croom, Georgia, 9128", + "about": "Quis reprehenderit consectetur ad aliqua ad amet incididunt aute irure Lorem veniam. Consequat consequat ut reprehenderit officia cupidatat irure aliqua nostrud veniam velit aliquip magna elit. Ullamco amet nostrud est cupidatat adipisicing fugiat magna anim eu occaecat incididunt. Ut ex ut quis veniam nulla ad ea magna elit incididunt Lorem anim ipsum elit. Aliqua laborum officia magna aliqua. Est quis adipisicing cillum cupidatat sunt velit fugiat mollit exercitation cupidatat sit.\r\n", + "registered": "Saturday, June 21, 2014 9:02 PM", + "latitude": -64.407501, + "longitude": -157.742045, + "tags": [ + "pariatur", + "minim", + "consectetur", + "consequat", + "ad", + "aliqua", + "ut" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tina Patterson" + }, + { + "id": 1, + "name": "Gabriela Nielsen" + }, + { + "id": 2, + "name": "Amalia Mueller" + } + ], + "greeting": "Hello, Gonzales! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8210426ced41d67a58b", + "index": 70, + "guid": "5d045ae8-c32c-43f6-b404-30a943205f5e", + "isActive": true, + "balance": "$2,054.02", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "green", + "name": { + "first": "Clarissa", + "last": "Madden" + }, + "company": "NIQUENT", + "email": "clarissa.madden@niquent.biz", + "phone": "+1 (910) 480-3769", + "address": "637 Scholes Street, Needmore, Alabama, 9344", + "about": "Sunt nulla ad aliquip incididunt ullamco culpa laboris. Consectetur non enim in officia incididunt deserunt. Quis est consequat ipsum ad. Pariatur nostrud voluptate magna occaecat minim irure sint nostrud voluptate ea labore ullamco quis. Mollit veniam consequat commodo sunt.\r\n", + "registered": "Wednesday, January 8, 2014 4:02 AM", + "latitude": 46.115788, + "longitude": 79.731859, + "tags": [ + "reprehenderit", + "nisi", + "id", + "consectetur", + "sunt", + "nostrud", + "laboris" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mandy Buckner" + }, + { + "id": 1, + "name": "Hickman Brown" + }, + { + "id": 2, + "name": "Kemp Mclaughlin" + } + ], + "greeting": "Hello, Clarissa! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8211049b550404f3ce5", + "index": 71, + "guid": "2f9a6201-6728-4509-8d88-c0a614649311", + "isActive": true, + "balance": "$1,324.10", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Joyce", + "last": "Callahan" + }, + "company": "OPTYK", + "email": "joyce.callahan@optyk.io", + "phone": "+1 (893) 544-2327", + "address": "567 Crystal Street, Freetown, District Of Columbia, 8319", + "about": "Deserunt in nisi id consequat qui. Sunt velit proident id culpa incididunt velit aute dolore labore. Deserunt qui ea adipisicing cillum irure sit sunt excepteur quis et quis nulla dolore pariatur. Consequat ut et veniam dolor velit nulla veniam fugiat commodo velit fugiat ad veniam ad. Anim consequat labore deserunt eiusmod esse. Laborum labore eu et incididunt commodo dolore eiusmod occaecat. Nisi elit duis mollit cillum id enim.\r\n", + "registered": "Tuesday, February 4, 2014 2:38 AM", + "latitude": -76.437449, + "longitude": -169.66079, + "tags": [ + "ullamco", + "non", + "officia", + "eiusmod", + "duis", + "cupidatat", + "mollit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hendricks Logan" + }, + { + "id": 1, + "name": "Dolly Baird" + }, + { + "id": 2, + "name": "Wendi Wallace" + } + ], + "greeting": "Hello, Joyce! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82176dab97e42086b90", + "index": 72, + "guid": "9680920d-9303-471b-849e-c30e38e06d45", + "isActive": false, + "balance": "$2,696.40", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Felecia", + "last": "Gonzalez" + }, + "company": "CORIANDER", + "email": "felecia.gonzalez@coriander.net", + "phone": "+1 (923) 575-3582", + "address": "315 Borinquen Pl, Rosburg, Pennsylvania, 9619", + "about": "Laboris officia exercitation duis aliqua in sint consectetur. Ad ut labore ipsum ipsum ut culpa labore irure aute. Et exercitation tempor do dolor culpa ad ipsum pariatur adipisicing fugiat. Incididunt amet incididunt minim quis cupidatat pariatur enim fugiat reprehenderit est ipsum labore.\r\n", + "registered": "Monday, January 20, 2014 11:59 PM", + "latitude": -36.201421, + "longitude": 162.994705, + "tags": [ + "cillum", + "reprehenderit", + "non", + "est", + "tempor", + "exercitation", + "fugiat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Gould Waters" + }, + { + "id": 1, + "name": "Ramona Coffey" + }, + { + "id": 2, + "name": "Taylor Byers" + } + ], + "greeting": "Hello, Felecia! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821df55849ccd4ca74c", + "index": 73, + "guid": "01b7c49a-6dac-4b65-906b-4483de07a5e8", + "isActive": true, + "balance": "$2,037.32", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "blue", + "name": { + "first": "Stanton", + "last": "Rutledge" + }, + "company": "EXOBLUE", + "email": "stanton.rutledge@exoblue.com", + "phone": "+1 (817) 408-2566", + "address": "741 Crooke Avenue, Newry, Nevada, 8555", + "about": "Commodo adipisicing ea ipsum non irure quis excepteur. Qui laborum qui sit cupidatat dolore consectetur tempor esse occaecat cillum qui. Dolore in amet ea ex proident do nulla.\r\n", + "registered": "Thursday, August 28, 2014 8:35 PM", + "latitude": 87.299409, + "longitude": 22.167535, + "tags": [ + "commodo", + "aliqua", + "irure", + "ea", + "dolor", + "aute", + "non" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Best Klein" + }, + { + "id": 1, + "name": "Dickerson Mcknight" + }, + { + "id": 2, + "name": "Gayle Washington" + } + ], + "greeting": "Hello, Stanton! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821f3516c63c6fa05e8", + "index": 74, + "guid": "c299ecf4-0528-4280-b6b4-909801b1e9dd", + "isActive": true, + "balance": "$1,695.83", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "green", + "name": { + "first": "Thelma", + "last": "Barnett" + }, + "company": "DUOFLEX", + "email": "thelma.barnett@duoflex.biz", + "phone": "+1 (947) 416-2234", + "address": "325 Tampa Court, Zortman, Illinois, 3609", + "about": "Exercitation esse culpa enim anim. Cillum voluptate quis tempor excepteur elit aliquip consequat officia cupidatat laborum ad cupidatat. Mollit exercitation esse fugiat do do id irure tempor et duis. Ut sit reprehenderit velit sit eiusmod in officia nisi commodo magna id in. Lorem sint velit adipisicing aute.\r\n", + "registered": "Tuesday, April 22, 2014 5:11 AM", + "latitude": 20.61329, + "longitude": 48.592063, + "tags": [ + "et", + "excepteur", + "nostrud", + "ullamco", + "quis", + "occaecat", + "in" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Amy Parks" + }, + { + "id": 1, + "name": "Greene Nunez" + }, + { + "id": 2, + "name": "Janna Roth" + } + ], + "greeting": "Hello, Thelma! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821637950981c0fa4c8", + "index": 75, + "guid": "bc58930c-58d8-4223-8a98-20295ac61c4e", + "isActive": true, + "balance": "$1,836.33", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Nunez", + "last": "Freeman" + }, + "company": "ZILCH", + "email": "nunez.freeman@zilch.co.uk", + "phone": "+1 (959) 439-2497", + "address": "729 Varick Street, Marne, Tennessee, 1687", + "about": "Sunt nulla ipsum non in nulla. Dolore in fugiat in laborum veniam enim dolore cupidatat. Elit tempor ullamco id in minim excepteur et aute ut mollit aliquip qui consequat. Duis esse magna culpa aliqua ad ipsum deserunt laborum amet sint. Quis voluptate quis quis aute.\r\n", + "registered": "Monday, August 11, 2014 5:27 PM", + "latitude": 85.57657, + "longitude": -145.772132, + "tags": [ + "ullamco", + "tempor", + "et", + "non", + "magna", + "non", + "ex" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Latasha Randolph" + }, + { + "id": 1, + "name": "Neva Porter" + }, + { + "id": 2, + "name": "Drake Nicholson" + } + ], + "greeting": "Hello, Nunez! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ed9d29afc180b718", + "index": 76, + "guid": "45fb73c9-30e9-4e61-b78d-41c8f79a282e", + "isActive": false, + "balance": "$1,298.91", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "green", + "name": { + "first": "Bentley", + "last": "Reyes" + }, + "company": "ZYPLE", + "email": "bentley.reyes@zyple.tv", + "phone": "+1 (919) 510-3585", + "address": "227 Oakland Place, Farmers, Iowa, 9827", + "about": "Cillum proident eiusmod id amet anim laboris elit sint ea et non. Aliqua et reprehenderit amet est ea fugiat aute. Minim aute aliquip nulla elit. Duis ad exercitation excepteur laborum anim occaecat nulla sunt. Quis pariatur nulla Lorem consectetur proident sunt amet est et elit eu sunt. Ut irure voluptate consequat amet sint deserunt quis. Incididunt ea culpa commodo fugiat qui veniam quis Lorem incididunt dolor.\r\n", + "registered": "Wednesday, April 9, 2014 1:19 AM", + "latitude": -82.587336, + "longitude": -74.931056, + "tags": [ + "dolore", + "nisi", + "exercitation", + "ullamco", + "excepteur", + "qui", + "ut" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nadia Pearson" + }, + { + "id": 1, + "name": "Reba Frost" + }, + { + "id": 2, + "name": "Lilia Mcbride" + } + ], + "greeting": "Hello, Bentley! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821a936a115315db0b9", + "index": 77, + "guid": "a1b82517-f6a6-437a-9f2d-2c0043591cfa", + "isActive": false, + "balance": "$1,016.00", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Morales", + "last": "Shields" + }, + "company": "CORECOM", + "email": "morales.shields@corecom.name", + "phone": "+1 (814) 407-2079", + "address": "472 Coleridge Street, Shasta, Massachusetts, 501", + "about": "Ipsum elit adipisicing nostrud quis ut nisi ullamco consectetur ex laborum reprehenderit anim magna fugiat. In nulla dolor esse exercitation elit exercitation. Laborum sit esse velit magna ea irure est ut velit id nisi sint qui. Laborum voluptate amet cupidatat laborum aute in id duis irure. Enim aliqua enim magna ut consequat. Tempor est commodo elit eu et ut occaecat culpa ex ex. Ullamco cillum sint ipsum tempor anim ullamco sit pariatur excepteur dolor mollit ad quis.\r\n", + "registered": "Saturday, July 5, 2014 5:10 AM", + "latitude": -20.583102, + "longitude": -23.34973, + "tags": [ + "nulla", + "proident", + "enim", + "dolor", + "elit", + "excepteur", + "dolore" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lacey Perry" + }, + { + "id": 1, + "name": "Stokes Foley" + }, + { + "id": 2, + "name": "Trisha Morales" + } + ], + "greeting": "Hello, Morales! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8214ceb22176ac554f9", + "index": 78, + "guid": "1aa9258a-f508-4e73-adb9-5d2cce0dff79", + "isActive": true, + "balance": "$1,100.42", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "blue", + "name": { + "first": "Chandra", + "last": "Patel" + }, + "company": "PROVIDCO", + "email": "chandra.patel@providco.me", + "phone": "+1 (875) 430-3869", + "address": "543 Bainbridge Street, Kidder, Arkansas, 3624", + "about": "Magna ad tempor commodo esse labore mollit sit. Duis laborum excepteur dolor officia exercitation. Elit sunt ex voluptate anim consectetur in ullamco mollit qui non. Cupidatat ipsum et cupidatat cupidatat consequat nulla Lorem culpa minim dolore cupidatat ipsum sint.\r\n", + "registered": "Sunday, June 29, 2014 1:41 AM", + "latitude": 55.150075, + "longitude": -26.185265, + "tags": [ + "amet", + "consectetur", + "occaecat", + "cillum", + "enim", + "ut", + "occaecat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Geneva Snider" + }, + { + "id": 1, + "name": "Rose Michael" + }, + { + "id": 2, + "name": "Kirkland Mason" + } + ], + "greeting": "Hello, Chandra! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821050884ba49ad868d", + "index": 79, + "guid": "88a0d238-e3c9-4b1a-8d19-08c622f9eaae", + "isActive": false, + "balance": "$1,263.98", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Elsa", + "last": "Chambers" + }, + "company": "VICON", + "email": "elsa.chambers@vicon.us", + "phone": "+1 (940) 436-3956", + "address": "190 Albemarle Terrace, Sheatown, Hawaii, 2654", + "about": "Est do esse elit consectetur elit. Aliqua esse duis est sint non. Enim minim laborum ad duis. Proident laboris quis ea amet nulla occaecat ex laboris duis ut velit.\r\n", + "registered": "Saturday, April 19, 2014 3:28 AM", + "latitude": 62.679122, + "longitude": 95.229313, + "tags": [ + "magna", + "ipsum", + "Lorem", + "ut", + "duis", + "elit", + "sit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Laurie Fuentes" + }, + { + "id": 1, + "name": "Juana Blevins" + }, + { + "id": 2, + "name": "Virginia Hester" + } + ], + "greeting": "Hello, Elsa! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821eb0dd28cfdc525a9", + "index": 80, + "guid": "e1574006-8d49-4399-8b59-92eaa0ed2be1", + "isActive": true, + "balance": "$2,395.40", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "brown", + "name": { + "first": "Morris", + "last": "Trujillo" + }, + "company": "ZOGAK", + "email": "morris.trujillo@zogak.ca", + "phone": "+1 (919) 553-3453", + "address": "127 Clove Road, Keyport, Puerto Rico, 5969", + "about": "Minim do veniam non elit excepteur enim sunt excepteur non amet. Duis eu consequat adipisicing nostrud ad cupidatat tempor occaecat. Ea id consectetur non anim. Irure dolor qui excepteur anim excepteur adipisicing ea sint id aliquip consequat eu id. Ea exercitation officia nostrud ipsum. Voluptate sunt irure aliqua tempor pariatur irure labore ea amet. Quis reprehenderit aliqua pariatur esse id anim laborum ullamco amet.\r\n", + "registered": "Sunday, February 2, 2014 12:52 PM", + "latitude": -86.963921, + "longitude": -157.636932, + "tags": [ + "occaecat", + "amet", + "ex", + "ea", + "exercitation", + "aliqua", + "elit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lester Watkins" + }, + { + "id": 1, + "name": "Kellie Clayton" + }, + { + "id": 2, + "name": "Valencia Edwards" + } + ], + "greeting": "Hello, Morris! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821f5f0ae13a893ce36", + "index": 81, + "guid": "9dd75e1e-8f3c-473e-800e-518254719ca1", + "isActive": true, + "balance": "$1,956.52", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "brown", + "name": { + "first": "Gwen", + "last": "Park" + }, + "company": "INTERGEEK", + "email": "gwen.park@intergeek.org", + "phone": "+1 (830) 581-3561", + "address": "514 Beayer Place, Maxville, New Hampshire, 4162", + "about": "Consequat labore commodo nulla veniam aliqua. Tempor ipsum officia exercitation amet elit dolor labore eu voluptate cillum reprehenderit exercitation proident. Id cillum laborum cupidatat reprehenderit anim cillum exercitation culpa aliqua deserunt cupidatat. In proident ea eu nisi est enim. Quis dolor sit aliquip reprehenderit in id ipsum proident duis. Sit eu sint nisi velit. Minim elit nostrud aliquip anim.\r\n", + "registered": "Wednesday, September 17, 2014 9:38 AM", + "latitude": -66.199245, + "longitude": -52.824656, + "tags": [ + "ex", + "magna", + "incididunt", + "veniam", + "mollit", + "eu", + "sunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nieves Potter" + }, + { + "id": 1, + "name": "Welch Reeves" + }, + { + "id": 2, + "name": "Hardy Forbes" + } + ], + "greeting": "Hello, Gwen! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a20d620a7ec793ac", + "index": 82, + "guid": "1ae142ff-8d6a-4e72-a3f1-3ac4349ad0b7", + "isActive": false, + "balance": "$2,560.11", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Erickson", + "last": "Lancaster" + }, + "company": "SCENTRIC", + "email": "erickson.lancaster@scentric.biz", + "phone": "+1 (980) 465-3465", + "address": "412 Jackson Street, Chumuckla, South Carolina, 3216", + "about": "Id laborum consectetur pariatur non nulla incididunt labore magna minim duis. Ipsum laboris deserunt velit sunt voluptate. Laboris ipsum duis aliquip non aliqua. Commodo veniam mollit voluptate elit nisi nostrud laboris dolor tempor pariatur duis laborum.\r\n", + "registered": "Wednesday, May 7, 2014 8:50 PM", + "latitude": -57.590749, + "longitude": 117.31662, + "tags": [ + "ad", + "elit", + "non", + "fugiat", + "laborum", + "incididunt", + "enim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Reeves Nguyen" + }, + { + "id": 1, + "name": "Jo Christian" + }, + { + "id": 2, + "name": "Myers Lowe" + } + ], + "greeting": "Hello, Erickson! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8213747fd3443999762", + "index": 83, + "guid": "ec870637-4e65-49c4-abfb-7b99c2d2f094", + "isActive": true, + "balance": "$2,079.21", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "blue", + "name": { + "first": "Saundra", + "last": "Kemp" + }, + "company": "LUNCHPOD", + "email": "saundra.kemp@lunchpod.io", + "phone": "+1 (950) 433-2550", + "address": "593 Stuart Street, Edenburg, Texas, 6251", + "about": "Ipsum proident et duis reprehenderit in minim in sint dolore enim aute excepteur cillum eiusmod. Do nulla deserunt quis adipisicing sunt reprehenderit. Dolor pariatur tempor sint ullamco.\r\n", + "registered": "Tuesday, June 24, 2014 6:44 AM", + "latitude": 81.565149, + "longitude": -92.061448, + "tags": [ + "magna", + "commodo", + "esse", + "ad", + "labore", + "amet", + "mollit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tommie Mays" + }, + { + "id": 1, + "name": "Lou Hubbard" + }, + { + "id": 2, + "name": "Jenna Gentry" + } + ], + "greeting": "Hello, Saundra! You have 9 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821524a5fd2a5f037a2", + "index": 84, + "guid": "e7894fe9-efd4-44ae-afd5-49bfde09b000", + "isActive": true, + "balance": "$2,320.08", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "green", + "name": { + "first": "Christensen", + "last": "Wolf" + }, + "company": "GLUKGLUK", + "email": "christensen.wolf@glukgluk.net", + "phone": "+1 (937) 519-3107", + "address": "460 Porter Avenue, Irwin, South Dakota, 2498", + "about": "Ullamco reprehenderit non aliquip amet do deserunt nostrud ea deserunt fugiat. Commodo pariatur est officia dolor dolore in excepteur pariatur laborum ut. Occaecat cillum ullamco nulla eu esse non nisi pariatur ipsum amet do ea ad culpa. Excepteur esse elit laborum deserunt ad consequat. Culpa esse esse nostrud commodo laborum officia sint ea mollit. Dolor culpa pariatur fugiat cillum quis proident non enim esse pariatur duis adipisicing amet. Consequat minim aliqua enim excepteur.\r\n", + "registered": "Saturday, February 8, 2014 10:20 PM", + "latitude": 22.478522, + "longitude": 30.070646, + "tags": [ + "quis", + "ad", + "adipisicing", + "exercitation", + "non", + "eu", + "anim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bright Moon" + }, + { + "id": 1, + "name": "Stephenson Sears" + }, + { + "id": 2, + "name": "Fletcher Swanson" + } + ], + "greeting": "Hello, Christensen! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82160fa0826e90b747b", + "index": 85, + "guid": "af08cc83-e8c9-4bcd-84b3-2f3251cf9a02", + "isActive": true, + "balance": "$2,575.60", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "brown", + "name": { + "first": "Weaver", + "last": "Parker" + }, + "company": "RETROTEX", + "email": "weaver.parker@retrotex.com", + "phone": "+1 (951) 411-2545", + "address": "560 Madison Place, Sidman, Ohio, 5153", + "about": "Reprehenderit esse dolor tempor consectetur occaecat exercitation consectetur irure commodo. Et eiusmod aute ipsum eu commodo qui id anim proident. Excepteur labore laboris aliqua incididunt ut nisi consequat deserunt dolore officia velit sint consectetur. Laboris sint minim mollit duis. Excepteur nostrud incididunt aute consequat ad magna eu quis. Id dolor eu aliqua deserunt cillum sint ea et nostrud. Lorem amet cillum nulla tempor commodo mollit aliquip do est enim enim.\r\n", + "registered": "Thursday, February 6, 2014 12:55 PM", + "latitude": 87.778566, + "longitude": -122.687026, + "tags": [ + "eu", + "voluptate", + "commodo", + "magna", + "ullamco", + "nulla", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Ratliff Mckinney" + }, + { + "id": 1, + "name": "Walker Frye" + }, + { + "id": 2, + "name": "Mcgowan Daniel" + } + ], + "greeting": "Hello, Weaver! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8217a543d20bd4b10c1", + "index": 86, + "guid": "5b670ab7-4ee6-4b8e-b379-4e1849b6e329", + "isActive": false, + "balance": "$3,114.39", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Annabelle", + "last": "Sanders" + }, + "company": "PHORMULA", + "email": "annabelle.sanders@phormula.biz", + "phone": "+1 (982) 489-2678", + "address": "970 Llama Court, Moraida, Maine, 8694", + "about": "Nostrud adipisicing magna nulla magna voluptate duis eu voluptate cupidatat ut dolore excepteur esse dolor. Aliquip exercitation occaecat amet excepteur sit. Velit adipisicing esse labore veniam duis ullamco in ea. Adipisicing eiusmod cillum veniam nostrud sint laboris sit id officia. Esse esse anim sint do ea id. Esse ipsum mollit sit laborum nostrud mollit nulla id.\r\n", + "registered": "Tuesday, January 7, 2014 7:34 AM", + "latitude": 9.515348, + "longitude": -99.138606, + "tags": [ + "ipsum", + "sint", + "dolor", + "laborum", + "est", + "consequat", + "magna" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Juliet Clements" + }, + { + "id": 1, + "name": "Jeannine Pruitt" + }, + { + "id": 2, + "name": "Chambers Warren" + } + ], + "greeting": "Hello, Annabelle! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82179c1fe89e1ccec4d", + "index": 87, + "guid": "31ecaae4-6443-4c00-a20d-82100c49b488", + "isActive": true, + "balance": "$3,803.83", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Kelley", + "last": "Miles" + }, + "company": "AQUASURE", + "email": "kelley.miles@aquasure.co.uk", + "phone": "+1 (819) 529-2967", + "address": "680 Monument Walk, Wakulla, Vermont, 8903", + "about": "Ea sint dolor nostrud dolor id commodo esse nisi. Reprehenderit minim dolore nostrud sint incididunt excepteur reprehenderit enim velit velit. Proident officia velit Lorem dolore ullamco occaecat.\r\n", + "registered": "Saturday, May 3, 2014 12:23 AM", + "latitude": 73.767872, + "longitude": -118.631186, + "tags": [ + "consectetur", + "irure", + "nostrud", + "nostrud", + "aliquip", + "quis", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maynard Townsend" + }, + { + "id": 1, + "name": "Carlene Molina" + }, + { + "id": 2, + "name": "Mai Bentley" + } + ], + "greeting": "Hello, Kelley! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8218de20c1f1e1e93fa", + "index": 88, + "guid": "8f6b0aac-f2ba-45aa-a7c8-76c413bdeb7a", + "isActive": true, + "balance": "$1,898.86", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Mckay", + "last": "Velasquez" + }, + "company": "NORALEX", + "email": "mckay.velasquez@noralex.tv", + "phone": "+1 (973) 599-3463", + "address": "152 Roebling Street, Mathews, Delaware, 7958", + "about": "Nostrud esse dolor excepteur cillum aliqua. Ea nulla elit minim sint non culpa id. Et ullamco aute laborum incididunt sint quis. Tempor tempor aliqua in sunt. Minim elit dolor quis excepteur exercitation adipisicing. Pariatur incididunt tempor irure proident exercitation deserunt sint.\r\n", + "registered": "Monday, August 4, 2014 5:00 AM", + "latitude": 81.463276, + "longitude": -66.291508, + "tags": [ + "nisi", + "anim", + "qui", + "est", + "qui", + "ipsum", + "ullamco" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Myrna Rollins" + }, + { + "id": 1, + "name": "Tricia Gilliam" + }, + { + "id": 2, + "name": "Collins Obrien" + } + ], + "greeting": "Hello, Mckay! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821b3490524365bc954", + "index": 89, + "guid": "43825015-fed6-40c8-bd80-8aaf228067f9", + "isActive": false, + "balance": "$3,035.21", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Brandy", + "last": "Hayden" + }, + "company": "OMNIGOG", + "email": "brandy.hayden@omnigog.name", + "phone": "+1 (827) 481-2334", + "address": "537 Pioneer Street, Saticoy, Palau, 803", + "about": "Ea velit consectetur ipsum ut elit pariatur id labore sunt eu incididunt est aliqua. Veniam esse officia do non officia cupidatat proident id officia esse tempor non mollit dolore. Elit voluptate nulla exercitation laboris ex ad irure est do enim aute velit aute. Cillum adipisicing nisi dolor velit duis ad fugiat deserunt non commodo Lorem fugiat sint qui. Aute consectetur magna incididunt tempor in esse consectetur magna qui sit. Culpa consequat laborum duis adipisicing dolor in deserunt ut velit ea ex dolore ullamco esse.\r\n", + "registered": "Wednesday, February 12, 2014 7:32 PM", + "latitude": 4.319892, + "longitude": 81.048442, + "tags": [ + "labore", + "ullamco", + "commodo", + "quis", + "aute", + "nulla", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Riggs Flynn" + }, + { + "id": 1, + "name": "Kidd Guerrero" + }, + { + "id": 2, + "name": "Rosario Wade" + } + ], + "greeting": "Hello, Brandy! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8211b737d086264c9a9", + "index": 90, + "guid": "ec5fe190-d16f-4728-bce1-c5140852c583", + "isActive": true, + "balance": "$2,932.98", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "green", + "name": { + "first": "Sonia", + "last": "Orr" + }, + "company": "MUSANPOLY", + "email": "sonia.orr@musanpoly.me", + "phone": "+1 (822) 422-2010", + "address": "908 Dekalb Avenue, Elfrida, Arizona, 6925", + "about": "Ut cillum ex irure amet aliquip voluptate Lorem fugiat reprehenderit sunt reprehenderit quis. Nulla laborum sunt elit ad labore ut cupidatat cillum eiusmod est. Ea irure amet excepteur mollit eu ipsum id adipisicing occaecat. Cupidatat sunt do veniam esse enim sint qui voluptate sint. Qui officia ad cupidatat mollit laboris. Duis tempor fugiat ea mollit cupidatat exercitation sunt incididunt.\r\n", + "registered": "Thursday, September 25, 2014 8:21 PM", + "latitude": 71.876999, + "longitude": 79.322401, + "tags": [ + "pariatur", + "sit", + "culpa", + "dolore", + "cupidatat", + "minim", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Morgan Pittman" + }, + { + "id": 1, + "name": "Bullock Cannon" + }, + { + "id": 2, + "name": "Lakeisha Lynch" + } + ], + "greeting": "Hello, Sonia! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821b271c92236e5e9b0", + "index": 91, + "guid": "57cff12c-2a18-48c2-b15a-3c393de707b6", + "isActive": false, + "balance": "$1,874.97", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "green", + "name": { + "first": "Stephens", + "last": "Whitaker" + }, + "company": "EARTHPLEX", + "email": "stephens.whitaker@earthplex.us", + "phone": "+1 (960) 419-2183", + "address": "368 Division Avenue, Fivepointville, Colorado, 8609", + "about": "Do aliquip laboris irure consectetur esse reprehenderit. Cillum ex deserunt fugiat ut dolore excepteur culpa eiusmod sit ullamco velit consequat consequat aliquip. Nostrud officia est enim velit fugiat laboris.\r\n", + "registered": "Monday, May 26, 2014 8:04 PM", + "latitude": 7.153481, + "longitude": 100.823578, + "tags": [ + "laborum", + "irure", + "dolore", + "enim", + "nisi", + "cillum", + "deserunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Perkins Campos" + }, + { + "id": 1, + "name": "Murray Randall" + }, + { + "id": 2, + "name": "Wallace Blackwell" + } + ], + "greeting": "Hello, Stephens! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821c115ee94a728efa4", + "index": 92, + "guid": "7bbe7ace-73f9-4d25-8f20-b50f4b9d4e60", + "isActive": true, + "balance": "$1,168.18", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Shepard", + "last": "Sanchez" + }, + "company": "YOGASM", + "email": "shepard.sanchez@yogasm.ca", + "phone": "+1 (959) 436-3299", + "address": "154 Seeley Street, Witmer, Missouri, 994", + "about": "Adipisicing ut id nulla occaecat enim officia reprehenderit non magna dolor Lorem. Culpa ad proident duis cupidatat nostrud occaecat esse elit pariatur quis Lorem. Velit exercitation anim dolore nisi labore consequat cupidatat magna nostrud sint ut deserunt enim.\r\n", + "registered": "Saturday, February 22, 2014 10:23 PM", + "latitude": -81.429217, + "longitude": -27.374426, + "tags": [ + "aliqua", + "minim", + "consequat", + "aliqua", + "qui", + "proident", + "consequat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sara Bruce" + }, + { + "id": 1, + "name": "Kimberley Mcdaniel" + }, + { + "id": 2, + "name": "Tammi England" + } + ], + "greeting": "Hello, Shepard! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a094fdd2592addbd", + "index": 93, + "guid": "69bc8858-bd55-4fa0-95f9-d2cb680a390c", + "isActive": false, + "balance": "$1,746.96", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Kristy", + "last": "Johnston" + }, + "company": "KONGLE", + "email": "kristy.johnston@kongle.org", + "phone": "+1 (823) 558-3033", + "address": "122 Cadman Plaza, Suitland, Minnesota, 3918", + "about": "Laboris excepteur ea nostrud incididunt est laborum dolor. Consequat ad duis aute proident incididunt commodo adipisicing. Enim voluptate sunt et est excepteur eiusmod commodo. Mollit fugiat reprehenderit ex ullamco magna laboris commodo mollit. Cupidatat tempor tempor minim dolore. Excepteur ipsum esse ipsum nulla.\r\n", + "registered": "Wednesday, January 29, 2014 11:15 PM", + "latitude": 15.335329, + "longitude": -78.001472, + "tags": [ + "veniam", + "consequat", + "ex", + "anim", + "culpa", + "ullamco", + "ex" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Johns Cochran" + }, + { + "id": 1, + "name": "Sheppard Hicks" + }, + { + "id": 2, + "name": "Cervantes Donovan" + } + ], + "greeting": "Hello, Kristy! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821311b6242dc75f718", + "index": 94, + "guid": "9de76ab8-2ef7-4b46-a478-5dea8732650a", + "isActive": false, + "balance": "$2,549.05", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Melendez", + "last": "Golden" + }, + "company": "GRONK", + "email": "melendez.golden@gronk.biz", + "phone": "+1 (986) 553-2124", + "address": "647 Broadway , Reno, Northern Mariana Islands, 9487", + "about": "Aliquip pariatur deserunt culpa eu nostrud eu amet. Adipisicing elit occaecat aliqua ipsum ut. Cillum magna consectetur elit esse sint laboris duis. In ea enim aute eiusmod culpa. Labore quis sunt aliquip excepteur tempor irure amet consequat eu excepteur et occaecat.\r\n", + "registered": "Monday, April 14, 2014 9:41 AM", + "latitude": 55.489443, + "longitude": 3.620039, + "tags": [ + "pariatur", + "magna", + "quis", + "in", + "irure", + "amet", + "esse" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hilary Dennis" + }, + { + "id": 1, + "name": "Tyler Burgess" + }, + { + "id": 2, + "name": "Eugenia Donaldson" + } + ], + "greeting": "Hello, Melendez! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c394a9871527c834", + "index": 95, + "guid": "ec0f31d4-7264-4649-9af9-2716082915d3", + "isActive": true, + "balance": "$2,810.24", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": { + "first": "Eleanor", + "last": "Curtis" + }, + "company": "ZAPPIX", + "email": "eleanor.curtis@zappix.io", + "phone": "+1 (824) 461-3776", + "address": "981 Lyme Avenue, Oneida, Marshall Islands, 6015", + "about": "Commodo labore do pariatur exercitation voluptate ea velit velit qui ex duis. Commodo est excepteur ad labore aute enim eu. Adipisicing irure exercitation sint consectetur exercitation occaecat aute exercitation commodo.\r\n", + "registered": "Monday, May 5, 2014 7:51 AM", + "latitude": -5.604007, + "longitude": -125.532894, + "tags": [ + "aliqua", + "commodo", + "non", + "magna", + "quis", + "velit", + "irure" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Monica Carr" + }, + { + "id": 1, + "name": "Coleman Simpson" + }, + { + "id": 2, + "name": "Wilma Fisher" + } + ], + "greeting": "Hello, Eleanor! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821a5b5e35d24f265b3", + "index": 96, + "guid": "c865c0ff-8505-4d02-af0a-c5a275d03fd5", + "isActive": true, + "balance": "$3,192.24", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Terrell", + "last": "Greene" + }, + "company": "AUTOGRATE", + "email": "terrell.greene@autograte.net", + "phone": "+1 (933) 557-3825", + "address": "435 Bridgewater Street, Fresno, New Jersey, 8124", + "about": "Elit sit ut anim reprehenderit anim ipsum esse tempor aliqua id ullamco. Exercitation est velit aliquip est elit mollit magna velit. Elit eiusmod esse voluptate consectetur enim id exercitation adipisicing et laborum. Irure fugiat ut sint exercitation dolor nostrud qui mollit eiusmod cupidatat. Commodo ad dolore incididunt ex quis nostrud veniam pariatur aliquip ea reprehenderit reprehenderit.\r\n", + "registered": "Sunday, September 21, 2014 3:31 AM", + "latitude": 76.638623, + "longitude": 147.966829, + "tags": [ + "aliquip", + "qui", + "nisi", + "ut", + "non", + "proident", + "et" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lucile Jordan" + }, + { + "id": 1, + "name": "Bender Sheppard" + }, + { + "id": 2, + "name": "Milagros Francis" + } + ], + "greeting": "Hello, Terrell! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8216cc444257e522ec0", + "index": 97, + "guid": "90c19881-6521-4012-aba7-f4d143953965", + "isActive": false, + "balance": "$1,509.96", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Holman", + "last": "Hines" + }, + "company": "NORSUP", + "email": "holman.hines@norsup.com", + "phone": "+1 (925) 565-2825", + "address": "902 Monroe Street, Marienthal, Virgin Islands, 2354", + "about": "Eu cupidatat consectetur labore voluptate nulla non amet incididunt labore non magna tempor. Veniam et sint qui reprehenderit reprehenderit sint ut laboris elit. Pariatur in eu dolore culpa nisi laboris officia magna do velit. Cupidatat proident excepteur officia labore irure sunt elit velit dolor commodo. Minim quis sint minim non incididunt Lorem elit cupidatat adipisicing quis esse non sint et. Laborum culpa incididunt incididunt fugiat minim ex deserunt et.\r\n", + "registered": "Sunday, April 27, 2014 8:05 PM", + "latitude": 61.825518, + "longitude": -28.393161, + "tags": [ + "voluptate", + "veniam", + "Lorem", + "ex", + "in", + "est", + "exercitation" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Cecelia Chapman" + }, + { + "id": 1, + "name": "Isabella Castaneda" + }, + { + "id": 2, + "name": "Montoya Chen" + } + ], + "greeting": "Hello, Holman! You have 9 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210659aa5cdca41f7e", + "index": 98, + "guid": "81adf15d-24b6-454c-a75f-f5be12f6ac75", + "isActive": false, + "balance": "$2,564.95", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Horton", + "last": "Poole" + }, + "company": "GROK", + "email": "horton.poole@grok.biz", + "phone": "+1 (908) 520-3683", + "address": "141 Gatling Place, Hackneyville, Oregon, 2976", + "about": "Consequat nisi reprehenderit incididunt id minim cillum. Lorem reprehenderit fugiat irure dolor excepteur velit. Est nostrud culpa ut reprehenderit in duis id voluptate pariatur voluptate. Exercitation Lorem esse exercitation Lorem esse. Excepteur mollit sit ut voluptate ipsum pariatur anim sint sunt cillum sit consequat.\r\n", + "registered": "Thursday, September 18, 2014 6:11 AM", + "latitude": -3.525694, + "longitude": -103.351985, + "tags": [ + "deserunt", + "voluptate", + "cillum", + "id", + "magna", + "deserunt", + "incididunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hope Lara" + }, + { + "id": 1, + "name": "French Garner" + }, + { + "id": 2, + "name": "Stein Sykes" + } + ], + "greeting": "Hello, Horton! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821cf1b15f13d8c3938", + "index": 99, + "guid": "44533089-c11a-4656-b2d1-9ab6be887f30", + "isActive": false, + "balance": "$3,192.31", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "green", + "name": { + "first": "Wanda", + "last": "Wiggins" + }, + "company": "ZEPITOPE", + "email": "wanda.wiggins@zepitope.co.uk", + "phone": "+1 (998) 461-3780", + "address": "427 Canton Court, Heil, Utah, 8283", + "about": "Do officia et exercitation dolor esse. Ut nisi eiusmod dolore laborum ad ex mollit minim. Pariatur qui culpa ullamco ex eiusmod.\r\n", + "registered": "Tuesday, August 26, 2014 6:59 PM", + "latitude": 44.770809, + "longitude": 150.936963, + "tags": [ + "consectetur", + "mollit", + "laborum", + "ipsum", + "quis", + "cupidatat", + "in" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Kristie Cain" + }, + { + "id": 1, + "name": "Geraldine Zimmerman" + }, + { + "id": 2, + "name": "Ingrid Harper" + } + ], + "greeting": "Hello, Wanda! You have 5 unread messages.", + "favoriteFruit": "strawberry" + } +] \ No newline at end of file diff --git a/vendor/github.com/opencontainers/selinux/Makefile b/vendor/github.com/opencontainers/selinux/Makefile index 86672b61045..e7861439aa9 100644 --- a/vendor/github.com/opencontainers/selinux/Makefile +++ b/vendor/github.com/opencontainers/selinux/Makefile @@ -8,6 +8,7 @@ endif .PHONY: test test: check-gopath go test -timeout 3m -tags "${BUILDTAGS}" ${TESTFLAGS} -v ./... + go test -timeout 3m ${TESTFLAGS} -v ./... .PHONY: lint: diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go index 2a31cd3c5cc..bb27ac9367b 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go @@ -9,7 +9,7 @@ func InitLabels(options []string) (string, string, error) { return "", "", nil } -func GetROMountLabel() string { +func ROMountLabel() string { return "" } @@ -25,7 +25,19 @@ func SetProcessLabel(processLabel string) error { return nil } -func GetFileLabel(path string) (string, error) { +func ProcessLabel() (string, error) { + return "", nil +} + +func SetSocketLabel(processLabel string) error { + return nil +} + +func SocketLabel() (string, error) { + return "", nil +} + +func FileLabel(path string) (string, error) { return "", nil } @@ -41,7 +53,7 @@ func Relabel(path string, fileLabel string, shared bool) error { return nil } -func GetPidLabel(pid int) (string, error) { +func PidLabel(pid int) (string, error) { return "", nil } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go index 63c4edd05eb..de214b2d504 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go @@ -95,6 +95,17 @@ func SetProcessLabel(processLabel string) error { return selinux.SetExecLabel(processLabel) } +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created +func SetSocketLabel(processLabel string) error { + return selinux.SetSocketLabel(processLabel) +} + +// SocketLabel retrieves the current default socket label setting +func SocketLabel() (string, error) { + return selinux.SocketLabel() +} + // ProcessLabel returns the process label that the kernel will assign // to the next program executed by the current process. If "" is returned // this indicates that the default labeling will happen for the process. @@ -102,7 +113,7 @@ func ProcessLabel() (string, error) { return selinux.ExecLabel() } -// GetFileLabel returns the label for specified path +// FileLabel returns the label for specified path func FileLabel(path string) (string, error) { return selinux.FileLabel(path) } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux_stub_test.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux_stub_test.go new file mode 100644 index 00000000000..cf0156ccc2f --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux_stub_test.go @@ -0,0 +1,121 @@ +// +build !selinux !linux + +package label + +import ( + "os" + "testing" +) + +func TestInit(t *testing.T) { + var testNull []string + _, _, err := InitLabels(testNull) + if err != nil { + t.Log("InitLabels Failed") + t.Fatal(err) + } + testDisabled := []string{"disable"} + roMountLabel := ROMountLabel() + if roMountLabel != "" { + t.Errorf("ROMountLabel Failed") + } + plabel, _, err := InitLabels(testDisabled) + if err != nil { + t.Log("InitLabels Disabled Failed") + t.Fatal(err) + } + if plabel != "" { + t.Log("InitLabels Disabled Failed") + t.FailNow() + } + testUser := []string{"user:user_u", "role:user_r", "type:user_t", "level:s0:c1,c15"} + plabel, _, err = InitLabels(testUser) + if err != nil { + t.Log("InitLabels User Failed") + t.Fatal(err) + } +} + +func TestRelabel(t *testing.T) { + testdir := "/tmp/test" + if err := os.Mkdir(testdir, 0755); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testdir) + label := "system_u:object_r:container_file_t:s0:c1,c2" + if err := Relabel("/etc", label, false); err != nil { + t.Fatalf("Relabel /etc succeeded") + } +} + +func TestSocketLabel(t *testing.T) { + label := "system_u:object_r:container_t:s0:c1,c2" + if err := SetSocketLabel(label); err != nil { + t.Fatal(err) + } + if _, err := SocketLabel(); err != nil { + t.Fatal(err) + } +} + +func TestProcessLabel(t *testing.T) { + label := "system_u:object_r:container_t:s0:c1,c2" + if err := SetProcessLabel(label); err != nil { + t.Fatal(err) + } + if _, err := ProcessLabel(); err != nil { + t.Fatal(err) + } +} + +func CheckLabelCompile(t *testing.T) { + if _, _, err := GenLabels(""); err != nil { + t.Fatal(err) + } + if test := FormatMountLabel("", ""); test != "" { + t.Fatal("Format failed") + } + + if test := FormatMountLabel("", ""); test != "" { + t.Fatal("Format failed") + } + + if _, err := FileLabel("/etc"); err != nil { + t.Fatal(err) + } + + if err := SetFileLabel("/etc", "foobar"); err != nil { + t.Fatal(err) + } + + if err := SetFileCreateLabel("foobar"); err != nil { + t.Fatal(err) + } + + if _, err := PidLabel(0); err != nil { + t.Fatal(err) + } + + ClearLabels() + + if err := ReserveLabel("foobar"); err != nil { + t.Fatal(err) + } + + if err := ReleaseLabel("foobar"); err != nil { + t.Fatal(err) + } + + DupSecOpt("foobar") + DisableSecOpt() + + if err := Validate("foobar"); err != nil { + t.Fatal(err) + } + if relabel := RelabelNeeded("foobar"); relabel { + t.Fatal("Relabel failed") + } + if shared := IsShared("foobar"); shared { + t.Fatal("isshared failed") + } +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux_test.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux_test.go index c5e1c4c764b..bbb7aa529e4 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux_test.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux_test.go @@ -159,3 +159,20 @@ func TestSELinuxNoLevel(t *testing.T) { t.Errorf("NewContaxt and con.Get() Failed on non mls label") } } + +func TestSocketLabel(t *testing.T) { + if !selinux.GetEnabled() { + return + } + label := "system_u:object_r:container_t:s0:c1,c2" + if err := selinux.SetSocketLabel(label); err != nil { + t.Fatal(err) + } + nlabel, err := selinux.SocketLabel() + if err != nil { + t.Fatal(err) + } + if label != nlabel { + t.Errorf("SocketLabel %s != %s", nlabel, label) + } +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index bbaa1e0d775..7832f749706 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -385,6 +385,17 @@ func SetExecLabel(label string) error { return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), label) } +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created +func SetSocketLabel(label string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid()), label) +} + +// SocketLabel retrieves the current socket label setting +func SocketLabel() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid())) +} + // Get returns the Context as a string func (c Context) Get() string { if c["level"] != "" { diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 5abf8a36231..99efa155ad0 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -96,6 +96,19 @@ func SetExecLabel(label string) error { return nil } +/* +SetSocketLabel sets the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func SetSocketLabel(label string) error { + return nil +} + +// SocketLabel retrieves the current socket label setting +func SocketLabel() (string, error) { + return "", nil +} + // Get returns the Context as a string func (c Context) Get() string { return "" diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub_test.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub_test.go index f50b84953fb..692a3d47ea3 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub_test.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub_test.go @@ -10,4 +10,68 @@ func TestSELinux(t *testing.T) { if GetEnabled() { t.Fatal("SELinux enabled with build-tag !selinux.") } + + if _, err := FileLabel("/etc"); err != nil { + t.Fatal(err) + } + + if err := SetFileLabel("/etc", "foobar"); err != nil { + t.Fatal(err) + } + + if err := SetFSCreateLabel("foobar"); err != nil { + t.Fatal(err) + } + + if _, err := FSCreateLabel(); err != nil { + t.Fatal(err) + } + if _, err := CurrentLabel(); err != nil { + t.Fatal(err) + } + + if _, err := PidLabel(0); err != nil { + t.Fatal(err) + } + + ClearLabels() + + ReserveLabel("foobar") + ReleaseLabel("foobar") + DupSecOpt("foobar") + DisableSecOpt() + SetDisabled() + if enabled := GetEnabled(); enabled { + t.Fatal("Should not be enabled") + } + if err := SetExecLabel("foobar"); err != nil { + t.Fatal(err) + } + if _, err := ExecLabel(); err != nil { + t.Fatal(err) + } + if _, err := CanonicalizeContext("foobar"); err != nil { + t.Fatal(err) + } + if err := SetSocketLabel("foobar"); err != nil { + t.Fatal(err) + } + if _, err := SocketLabel(); err != nil { + t.Fatal(err) + } + con := NewContext("foobar") + con.Get() + if err := SetEnforceMode(1); err != nil { + t.Fatal(err) + } + DefaultEnforceMode() + EnforceMode() + ROFileLabel() + ContainerLabels() + if err := SecurityCheckContext("foobar"); err != nil { + t.Fatal(err) + } + if _, err := CopyLevel("foo", "bar"); err != nil { + t.Fatal(err) + } } diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000000..ac53e733e79 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,127 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: bad release") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go new file mode 100644 index 00000000000..f96d349e73b --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package semaphore_test + +import ( + "context" + "fmt" + "testing" + + "golang.org/x/sync/semaphore" +) + +// weighted is an interface matching a subset of *Weighted. It allows +// alternate implementations for testing and benchmarking. +type weighted interface { + Acquire(context.Context, int64) error + TryAcquire(int64) bool + Release(int64) +} + +// semChan implements Weighted using a channel for +// comparing against the condition variable-based implementation. +type semChan chan struct{} + +func newSemChan(n int64) semChan { + return semChan(make(chan struct{}, n)) +} + +func (s semChan) Acquire(_ context.Context, n int64) error { + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return nil +} + +func (s semChan) TryAcquire(n int64) bool { + if int64(len(s))+n > int64(cap(s)) { + return false + } + + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return true +} + +func (s semChan) Release(n int64) { + for i := int64(0); i < n; i++ { + <-s + } +} + +// acquireN calls Acquire(size) on sem N times and then calls Release(size) N times. +func acquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + sem.Acquire(context.Background(), size) + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +// tryAcquireN calls TryAcquire(size) on sem N times and then calls Release(size) N times. +func tryAcquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + if !sem.TryAcquire(size) { + b.Fatalf("TryAcquire(%v) = false, want true", size) + } + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +func BenchmarkNewSeq(b *testing.B) { + for _, cap := range []int64{1, 128} { + b.Run(fmt.Sprintf("Weighted-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = semaphore.NewWeighted(cap) + } + }) + b.Run(fmt.Sprintf("semChan-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = newSemChan(cap) + } + }) + } +} + +func BenchmarkAcquireSeq(b *testing.B) { + for _, c := range []struct { + cap, size int64 + N int + }{ + {1, 1, 1}, + {2, 1, 1}, + {16, 1, 1}, + {128, 1, 1}, + {2, 2, 1}, + {16, 2, 8}, + {128, 2, 64}, + {2, 1, 2}, + {16, 8, 2}, + {128, 64, 2}, + } { + for _, w := range []struct { + name string + w weighted + }{ + {"Weighted", semaphore.NewWeighted(c.cap)}, + {"semChan", newSemChan(c.cap)}, + } { + b.Run(fmt.Sprintf("%s-acquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + acquireN(b, w.w, c.size, c.N) + }) + b.Run(fmt.Sprintf("%s-tryAcquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + tryAcquireN(b, w.w, c.size, c.N) + }) + } + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go new file mode 100644 index 00000000000..e75cd79f5bc --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go @@ -0,0 +1,84 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "context" + "fmt" + "log" + "runtime" + + "golang.org/x/sync/semaphore" +) + +// Example_workerPool demonstrates how to use a semaphore to limit the number of +// goroutines working on parallel tasks. +// +// This use of a semaphore mimics a typical “worker pool” pattern, but without +// the need to explicitly shut down idle workers when the work is done. +func Example_workerPool() { + ctx := context.TODO() + + var ( + maxWorkers = runtime.GOMAXPROCS(0) + sem = semaphore.NewWeighted(int64(maxWorkers)) + out = make([]int, 32) + ) + + // Compute the output using up to maxWorkers goroutines at a time. + for i := range out { + // When maxWorkers goroutines are in flight, Acquire blocks until one of the + // workers finishes. + if err := sem.Acquire(ctx, 1); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + break + } + + go func(i int) { + defer sem.Release(1) + out[i] = collatzSteps(i + 1) + }(i) + } + + // Acquire all of the tokens to wait for any remaining workers to finish. + // + // If you are already waiting for the workers by some other means (such as an + // errgroup.Group), you can omit this final Acquire call. + if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + } + + fmt.Println(out) + + // Output: + // [0 1 7 2 5 8 16 3 19 6 14 9 9 17 17 4 12 20 20 7 7 15 15 10 23 10 111 18 18 18 106 5] +} + +// collatzSteps computes the number of steps to reach 1 under the Collatz +// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.) +func collatzSteps(n int) (steps int) { + if n <= 0 { + panic("nonpositive input") + } + + for ; n > 1; steps++ { + if steps < 0 { + panic("too many steps") + } + + if n%2 == 0 { + n /= 2 + continue + } + + const maxInt = int(^uint(0) >> 1) + if n > (maxInt-1)/3 { + panic("overflow") + } + n = 3*n + 1 + } + + return steps +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_test.go new file mode 100644 index 00000000000..b5f8f1338c8 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_test.go @@ -0,0 +1,171 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "context" + "math/rand" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +const maxSleep = 1 * time.Millisecond + +func HammerWeighted(sem *semaphore.Weighted, n int64, loops int) { + for i := 0; i < loops; i++ { + sem.Acquire(context.Background(), n) + time.Sleep(time.Duration(rand.Int63n(int64(maxSleep/time.Nanosecond))) * time.Nanosecond) + sem.Release(n) + } +} + +func TestWeighted(t *testing.T) { + t.Parallel() + + n := runtime.GOMAXPROCS(0) + loops := 10000 / n + sem := semaphore.NewWeighted(int64(n)) + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + i := i + go func() { + defer wg.Done() + HammerWeighted(sem, int64(i), loops) + }() + } + wg.Wait() +} + +func TestWeightedPanic(t *testing.T) { + t.Parallel() + + defer func() { + if recover() == nil { + t.Fatal("release of an unacquired weighted semaphore did not panic") + } + }() + w := semaphore.NewWeighted(1) + w.Release(1) +} + +func TestWeightedTryAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + tries = append(tries, sem.TryAcquire(1)) + + sem.Release(2) + + tries = append(tries, sem.TryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tryAcquire := func(n int64) bool { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + return sem.Acquire(ctx, n) == nil + } + + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + tries = append(tries, tryAcquire(1)) + + sem.Release(2) + + tries = append(tries, tryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedDoesntBlockIfTooBig(t *testing.T) { + t.Parallel() + + const n = 2 + sem := semaphore.NewWeighted(n) + { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go sem.Acquire(ctx, n+1) + } + + g, ctx := errgroup.WithContext(context.Background()) + for i := n * 3; i > 0; i-- { + g.Go(func() error { + err := sem.Acquire(ctx, 1) + if err == nil { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + } + return err + }) + } + if err := g.Wait(); err != nil { + t.Errorf("semaphore.NewWeighted(%v) failed to AcquireCtx(_, 1) with AcquireCtx(_, %v) pending", n, n+1) + } +} + +// TestLargeAcquireDoesntStarve times out if a large call to Acquire starves. +// Merely returning from the test function indicates success. +func TestLargeAcquireDoesntStarve(t *testing.T) { + t.Parallel() + + ctx := context.Background() + n := int64(runtime.GOMAXPROCS(0)) + sem := semaphore.NewWeighted(n) + running := true + + var wg sync.WaitGroup + wg.Add(int(n)) + for i := n; i > 0; i-- { + sem.Acquire(ctx, 1) + go func() { + defer func() { + sem.Release(1) + wg.Done() + }() + for running { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + sem.Acquire(ctx, 1) + } + }() + } + + sem.Acquire(ctx, n) + running = false + sem.Release(n) + wg.Wait() +}