diff --git a/go.mod b/go.mod index baee6988eb4..8a97bd52ff3 100644 --- a/go.mod +++ b/go.mod @@ -115,3 +115,5 @@ require ( k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 // indirect k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a // indirect ) + +replace github.com/containers/image => github.com/QiWang19/image v0.0.0-20190823164615-55d40423d88f diff --git a/go.sum b/go.sum index e837f0efc2e..7f4013de6b3 100644 --- a/go.sum +++ b/go.sum @@ -30,6 +30,16 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/QiWang19/image v0.0.0-20190820155639-cacda0d3194a h1:9VJKg/Y6Tulp6Jj0gcfiYtwkQQQBI3K/iNXkhQq2czQ= +github.com/QiWang19/image v0.0.0-20190820155639-cacda0d3194a/go.mod h1:zPTH3Ua2RjW88FkIxVoEowJ9y1/GZxi/lHaE+w8nEHA= +github.com/QiWang19/image v0.0.0-20190820163857-96fd44143c1f h1:N1ja9el2q1B6iXsWeGid7dQD8raYSTaJ2Lm2W3uo0Vg= +github.com/QiWang19/image v0.0.0-20190820163857-96fd44143c1f/go.mod h1:zPTH3Ua2RjW88FkIxVoEowJ9y1/GZxi/lHaE+w8nEHA= +github.com/QiWang19/image v0.0.0-20190821135219-a0a9de846d87 h1:rBI3WRuqszVeKQXyMHKQKrXL+IsWNyYrQooYMaCbNx8= +github.com/QiWang19/image v0.0.0-20190821135219-a0a9de846d87/go.mod h1:zPTH3Ua2RjW88FkIxVoEowJ9y1/GZxi/lHaE+w8nEHA= +github.com/QiWang19/image v0.0.0-20190821182102-53206a8ceffc h1:73gIAoUHKSWq6dcJIiwcMF0nbiyKynoUACGeK0wZku0= +github.com/QiWang19/image v0.0.0-20190821182102-53206a8ceffc/go.mod h1:zPTH3Ua2RjW88FkIxVoEowJ9y1/GZxi/lHaE+w8nEHA= +github.com/QiWang19/image v0.0.0-20190823164615-55d40423d88f h1:khx1FWdwhJhhXT73Ls6z8SDpuxgNo3SAYQstwjeesKU= +github.com/QiWang19/image v0.0.0-20190823164615-55d40423d88f/go.mod h1:zPTH3Ua2RjW88FkIxVoEowJ9y1/GZxi/lHaE+w8nEHA= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index f1b029f9740..16c7900c67f 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -21,7 +21,6 @@ import ( "github.com/containers/image/signature" "github.com/containers/image/transports" "github.com/containers/image/types" - "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -43,6 +42,9 @@ type digestingReader struct { // downloads. Let's follow Firefox by limiting it to 6. var maxParallelDownloads = 6 +// compressionBufferSize is the buffer size used to compress a blob +var compressionBufferSize = 1048576 + // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. // (neither is set if EOF is never reached). @@ -86,14 +88,16 @@ func (d *digestingReader) Read(p []byte) (int, error) { // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache types.BlobInfoCache - copyInParallel bool + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache types.BlobInfoCache + copyInParallel bool + compressionFormat compression.Algorithm + compressionLevel *int } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -166,6 +170,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, progressOutput = ioutil.Discard } copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() + c := &copier{ dest: dest, rawSource: rawSource, @@ -179,6 +184,20 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // we might want to add a separate CommonCtx — or would that be too confusing? blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), } + // Default to using gzip compression unless specified otherwise. + if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { + algo, err := compression.AlgorithmByName("gzip") + if err != nil { + return nil, err + } + c.compressionFormat = algo + } else { + c.compressionFormat = *options.DestinationCtx.CompressionFormat + } + if options.DestinationCtx != nil { + // Note that the compressionLevel can be nil. + c.compressionLevel = options.DestinationCtx.CompressionLevel + } unparsedToplevel := image.UnparsedInstance(rawSource, nil) multiImage, err := isMultiImage(ctx, unparsedToplevel) @@ -805,7 +824,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // === Detect compression of the input stream. // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform + compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } @@ -819,6 +838,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr originalLayerReader = destStream } + desiredCompressionFormat := c.compressionFormat + // === Deal with layer compression/decompression if necessary var inputInfo types.BlobInfo var compressionOperation types.LayerCompression @@ -831,7 +852,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. - go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter + go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + compressionOperation = types.PreserveOriginal + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter + destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 @@ -847,6 +888,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr inputInfo.Digest = "" inputInfo.Size = -1 } else { + // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. logrus.Debugf("Using original blob without modification") compressionOperation = types.PreserveOriginal inputInfo = srcInfo @@ -907,14 +949,19 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func compressGoroutine(dest *io.PipeWriter, src io.Reader) { +func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) { err := errors.New("Internal error: unexpected panic in compressGoroutine") defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() }() - zipper := pgzip.NewWriter(dest) - defer zipper.Close() + compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel) + if err != nil { + return + } + defer compressor.Close() + + buf := make([]byte, compressionBufferSize) - _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() } diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go index aad2bfcf269..b42151cffc1 100644 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/pkg/compression/compression.go @@ -3,6 +3,7 @@ package compression import ( "bytes" "compress/bzip2" + "fmt" "io" "io/ioutil" @@ -35,32 +36,82 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) { return ioutil.NopCloser(r), nil } -// compressionAlgos is an internal implementation detail of DetectCompression -var compressionAlgos = map[string]struct { +// compressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type compressorFunc func(io.Writer, *int) (io.WriteCloser, error) + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string prefix []byte decompressor DecompressorFunc -}{ - "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952) - "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress) - "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) + compressor compressorFunc } -// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// compressionAlgos is an internal implementation detail of DetectCompression +var compressionAlgos = []Algorithm{ + {"gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor}, // gzip (RFC 1952) + {"bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor}, // bzip2 (decompress.c:BZ2_decompress) + {"xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) + {"zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor}, // zstd (http://www.zstd.net) +} + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + for _, c := range compressionAlgos { + if c.name == name { + return c, nil + } + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + return algo.compressor(dest, level) +} + +// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. // Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. -func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { buffer := [8]byte{} n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. - return nil, nil, err + return Algorithm{}, nil, nil, err } + var retAlgo Algorithm var decompressor DecompressorFunc - for name, algo := range compressionAlgos { + for _, algo := range compressionAlgos { if bytes.HasPrefix(buffer[:n], algo.prefix) { - logrus.Debugf("Detected compression format %s", name) + logrus.Debugf("Detected compression format %s", algo.name) + retAlgo = algo decompressor = algo.decompressor break } @@ -69,7 +120,14 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { logrus.Debugf("No compression detected") } - return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e } // AutoDecompress takes a stream and returns an uncompressed version of the diff --git a/vendor/github.com/containers/image/pkg/compression/zstd.go b/vendor/github.com/containers/image/pkg/compression/zstd.go new file mode 100644 index 00000000000..962fe967649 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go index eef629d5c6e..5ab6ba649e2 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/pkg/docker/config/config.go @@ -142,9 +142,17 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error { }) } -// RemoveAllAuthentication deletes all the credentials stored in auth.json +// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring func RemoveAllAuthentication(sys *types.SystemContext) error { return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if enableKeyring { + err := removeAllAuthFromKernelKeyring() + if err == nil { + logrus.Debugf("removing all credentials from kernel keyring") + return false, nil + } + logrus.Debugf("error removing credentials from kernel keyring") + } auths.CredHelpers = make(map[string]string) auths.AuthConfigs = make(map[string]dockerAuthConfig) return true, nil diff --git a/vendor/github.com/containers/image/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/pkg/docker/config/config_linux.go index 4d66a50df54..0f476bde505 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config_linux.go +++ b/vendor/github.com/containers/image/pkg/docker/config/config_linux.go @@ -3,11 +3,16 @@ package config import ( "fmt" "strings" + "unsafe" "github.com/containers/image/pkg/keyctl" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) +const keyDescribePrefix = "container-registry-login:" + func getAuthFromKernelKeyring(registry string) (string, string, error) { userkeyring, err := keyctl.UserKeyring() if err != nil { @@ -41,6 +46,34 @@ func deleteAuthFromKernelKeyring(registry string) error { return key.Unlink() } +func removeAllAuthFromKernelKeyring() error { + keyIDs, err := readUserKeyring() + if err != nil { + return err + } + + for _, kID := range keyIDs { + keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(kID)) + if err != nil { + return err + } + // split string "type;uid;gid;perm;description" + keyAttrs := strings.SplitN(keyAttr, ";", 5) + if len(keyAttrs) < 5 { + return errors.Errorf("Key attributes of %d are not avaliable", kID) + } + keyDescribe := keyAttrs[4] + if strings.HasPrefix(keyDescribe, keyDescribePrefix) { + _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(kID), int(unix.KEY_SPEC_USER_KEYRING), 0, 0) + if err != nil { + return errors.Wrapf(err, "error unlinking key %d", kID) + } + logrus.Debugf("unlink key %d:%s", kID, keyAttr) + } + } + return nil +} + func setAuthToKernelKeyring(registry, username, password string) error { keyring, err := keyctl.SessionKeyring() if err != nil { @@ -75,5 +108,46 @@ func setAuthToKernelKeyring(registry, username, password string) error { } func genDescription(registry string) string { - return fmt.Sprintf("container-registry-login:%s", registry) + return fmt.Sprintf("%s%s", keyDescribePrefix, registry) +} + +// readUserKeyring reads user keyring and returns slice of key id(key_serial_t) representing the IDs of all the keys that are linked to it +func readUserKeyring() ([]int32, error) { + var ( + b []byte + err error + sizeRead int + ) + + krSize := 4 + size := krSize + b = make([]byte, size) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + krSize = sizeRead + } + } + + keyIDs := getKeyIDsFromByte(b[:krSize]) + return keyIDs, err +} + +func getKeyIDsFromByte(byteKeyIDs []byte) []int32 { + idSize := 4 + var keyIDs []int32 + for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { + tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) + keyIDs = append(keyIDs, tempID) + } + return keyIDs } diff --git a/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go index 1c1a02511cf..9b0e8bee25b 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go +++ b/vendor/github.com/containers/image/pkg/docker/config/config_unsupported.go @@ -14,3 +14,7 @@ func deleteAuthFromKernelKeyring(registry string) error { func setAuthToKernelKeyring(registry, username, password string) error { return ErrNotSupported } + +func removeAllAuthFromKernelKeyring() error { + return ErrNotSupported +} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 08b4241e043..b94af8dccb3 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -6,6 +6,7 @@ import ( "time" "github.com/containers/image/docker/reference" + "github.com/containers/image/pkg/compression" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -511,6 +512,11 @@ type SystemContext struct { // === dir.Transport overrides === // DirForceCompress compresses the image layers if set to true DirForceCompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int } // ProgressProperties is used to pass information from the copy code to a monitor which diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go index f1e795d9bd4..26f290bc057 100644 --- a/vendor/github.com/containers/image/version/version.go +++ b/vendor/github.com/containers/image/version/version.go @@ -8,10 +8,10 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 0 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 3 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/modules.txt b/vendor/modules.txt index 3acff38c913..d08d74b8cee 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -60,7 +60,7 @@ github.com/containers/buildah/docker github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/overlay github.com/containers/buildah/pkg/unshare -# github.com/containers/image v3.0.2+incompatible +# github.com/containers/image v3.0.2+incompatible => github.com/QiWang19/image v0.0.0-20190823164615-55d40423d88f github.com/containers/image/directory github.com/containers/image/docker github.com/containers/image/docker/archive @@ -89,10 +89,10 @@ github.com/containers/image/version github.com/containers/image/docker/daemon github.com/containers/image/openshift github.com/containers/image/ostree +github.com/containers/image/pkg/compression github.com/containers/image/internal/tmpdir github.com/containers/image/oci/internal github.com/containers/image/pkg/blobinfocache -github.com/containers/image/pkg/compression github.com/containers/image/pkg/blobinfocache/boltdb github.com/containers/image/pkg/blobinfocache/memory github.com/containers/image/pkg/blobinfocache/internal/prioritize