diff --git a/.travis.yml b/.travis.yml index 8d847e258c..2432a053e5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,4 +13,6 @@ addons: apt: packages: + - btrfs-tools + - libdevmapper-dev - libgpgme11-dev diff --git a/Makefile b/Makefile index 0fd329021c..0f05cbd944 100644 --- a/Makefile +++ b/Makefile @@ -5,16 +5,17 @@ SKOPEO_REPO = projectatomic/skopeo SKOPEO_BRANCH = master # Set SUDO=sudo to run container integration tests using sudo. SUDO = +BUILDFLAGS = -tags "btrfs_noversion libdm_no_deferred_remove" all: deps .gitvalidation test validate deps: - go get -t ./... - go get -u github.com/golang/lint/golint - go get github.com/vbatts/git-validation + go get -t $(BUILDFLAGS) ./... + go get -u $(BUILDFLAGS) github.com/golang/lint/golint + go get $(BUILDFLAGS) github.com/vbatts/git-validation test: - @go test -cover ./... + @go test $(BUILDFLAGS) -cover ./... # This is not run as part of (make all), but Travis CI does run this. # Demonstarting a working version of skopeo (possibly with modified SKOPEO_REPO/SKOPEO_BRANCH, e.g. diff --git a/copy/copy.go b/copy/copy.go index e8a9d42d5b..555e378de4 100644 --- a/copy/copy.go +++ b/copy/copy.go @@ -32,6 +32,21 @@ type digestingReader struct { validationFailed bool } +// imageCopier allows us to keep track of diffID values for blobs, and other +// data, that we're copying between images, and cache other information that +// might allow us to take some shortcuts +type imageCopier struct { + copiedBlobs map[digest.Digest]digest.Digest + cachedDiffIDs map[digest.Digest]digest.Digest + manifestUpdates *types.ManifestUpdateOptions + dest types.ImageDestination + src types.Image + rawSource types.ImageSource + diffIDsAreNeeded bool + canModifyManifest bool + reportWriter io.Writer +} + // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // and set validationFailed to true if the source stream does not match expectedDigest. func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { @@ -147,7 +162,20 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe return err } - if err := copyLayers(&manifestUpdates, dest, src, rawSource, canModifyManifest, reportWriter); err != nil { + // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here. + ic := imageCopier{ + copiedBlobs: make(map[digest.Digest]digest.Digest), + cachedDiffIDs: make(map[digest.Digest]digest.Digest), + manifestUpdates: &manifestUpdates, + dest: dest, + src: src, + rawSource: rawSource, + diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates), + canModifyManifest: canModifyManifest, + reportWriter: reportWriter, + } + + if err := ic.copyLayers(); err != nil { return err } @@ -167,7 +195,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe return fmt.Errorf("Error reading manifest: %v", err) } - if err := copyConfig(dest, pendingImage, reportWriter); err != nil { + if err := ic.copyConfig(pendingImage); err != nil { return err } @@ -206,57 +234,41 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe return nil } -// copyLayers copies layers from src/rawSource to dest, using and updating manifestUpdates if necessary and canModifyManifest. -// If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time this function is called. -func copyLayers(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, rawSource types.ImageSource, - canModifyManifest bool, reportWriter io.Writer) error { - type copiedLayer struct { - blobInfo types.BlobInfo - diffID digest.Digest - } - - diffIDsAreNeeded := src.UpdatedImageNeedsLayerDiffIDs(*manifestUpdates) - - srcInfos := src.LayerInfos() +// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +func (ic *imageCopier) copyLayers() error { + srcInfos := ic.src.LayerInfos() destInfos := []types.BlobInfo{} diffIDs := []digest.Digest{} - copiedLayers := map[digest.Digest]copiedLayer{} for _, srcLayer := range srcInfos { - cl, ok := copiedLayers[srcLayer.Digest] - if !ok { - var ( - destInfo types.BlobInfo - diffID digest.Digest - err error - ) - if dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { - // DiffIDs are, currently, needed only when converting from schema1. - // In which case src.LayerInfos will not have URLs because schema1 - // does not support them. - if diffIDsAreNeeded { - return errors.New("getting DiffID for foreign layers is unimplemented") - } - destInfo = srcLayer - fmt.Fprintf(reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, dest.Reference().Transport().Name()) - } else { - fmt.Fprintf(reportWriter, "Copying blob %s\n", srcLayer.Digest) - destInfo, diffID, err = copyLayer(dest, rawSource, srcLayer, diffIDsAreNeeded, canModifyManifest, reportWriter) - if err != nil { - return err - } + var ( + destInfo types.BlobInfo + diffID digest.Digest + err error + ) + if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + return errors.New("getting DiffID for foreign layers is unimplemented") + } + destInfo = srcLayer + fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name()) + } else { + destInfo, diffID, err = ic.copyLayer(srcLayer) + if err != nil { + return err } - cl = copiedLayer{blobInfo: destInfo, diffID: diffID} - copiedLayers[srcLayer.Digest] = cl } - destInfos = append(destInfos, cl.blobInfo) - diffIDs = append(diffIDs, cl.diffID) + destInfos = append(destInfos, destInfo) + diffIDs = append(diffIDs, diffID) } - manifestUpdates.InformationOnly.LayerInfos = destInfos - if diffIDsAreNeeded { - manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } if layerDigestsDiffer(srcInfos, destInfos) { - manifestUpdates.LayerInfos = destInfos + ic.manifestUpdates.LayerInfos = destInfos } return nil } @@ -275,15 +287,15 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { } // copyConfig copies config.json, if any, from src to dest. -func copyConfig(dest types.ImageDestination, src types.Image, reportWriter io.Writer) error { +func (ic *imageCopier) copyConfig(src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - fmt.Fprintf(reportWriter, "Copying config %s\n", srcInfo.Digest) + fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) configBlob, err := src.ConfigBlob() if err != nil { return fmt.Errorf("Error reading config blob %s: %v", srcInfo.Digest, err) } - destInfo, err := copyBlobFromStream(dest, bytes.NewReader(configBlob), srcInfo, nil, false, reportWriter) + destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) if err != nil { return err } @@ -303,16 +315,40 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types.BlobInfo, - diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, digest.Digest, error) { - srcStream, srcBlobSize, err := src.GetBlob(srcInfo) +func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { + // Check if we already have a blob with this digest + haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) + if err != nil && err != types.ErrBlobNotFound { + return types.BlobInfo{}, "", fmt.Errorf("Error checking for blob %s at destination: %v", srcInfo.Digest, err) + } + // If we already have a cached diffID for this blob, we don't need to compute it + diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") + // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again + if haveBlob && !diffIDIsNeeded { + // Check the blob sizes match, if we were given a size this time + if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize { + return types.BlobInfo{}, "", fmt.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size) + } + srcInfo.Size = extantBlobSize + // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob + blobinfo, err := ic.dest.ReapplyBlob(srcInfo) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("Error reapplying blob %s at destination: %v", srcInfo.Digest, err) + } + fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) + return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err + } + + // Fallback: copy the layer, computing the diffID if we need to do so + fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) + srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err) } defer srcStream.Close() - blobInfo, diffIDChan, err := copyLayerFromStream(dest, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, - diffIDIsNeeded, canCompress, reportWriter) + blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, + diffIDIsNeeded) if err != nil { return types.BlobInfo{}, "", err } @@ -323,6 +359,7 @@ func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types return types.BlobInfo{}, "", fmt.Errorf("Error computing layer DiffID: %v", diffIDResult.err) } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest } return blobInfo, diffIDResult.digest, nil } @@ -331,8 +368,8 @@ func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types // it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, // perhaps compressing the stream if canCompress, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. -func copyLayerFromStream(dest types.ImageDestination, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, <-chan diffIDResult, error) { +func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(decompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -356,8 +393,7 @@ func copyLayerFromStream(dest types.ImageDestination, srcStream io.Reader, srcIn return pipeWriter } } - blobInfo, err := copyBlobFromStream(dest, srcStream, srcInfo, - getDiffIDRecorder, canCompress, reportWriter) // Sets err to nil on success + blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -391,9 +427,9 @@ func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Dige // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. -func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor decompressorFunc) io.Writer, canCompress bool, - reportWriter io.Writer) (types.BlobInfo, error) { +func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor decompressorFunc) io.Writer, + canCompress bool) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream @@ -419,13 +455,13 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf // === Report progress using a pb.Reader. bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = reportWriter + bar.Output = ic.reportWriter bar.SetMaxWidth(80) bar.ShowTimeLeft = false bar.ShowPercent = false bar.Start() destStream = bar.NewProxyReader(destStream) - defer fmt.Fprint(reportWriter, "\n") + defer fmt.Fprint(ic.reportWriter, "\n") // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. @@ -436,7 +472,7 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf // === Compress the layer if it is uncompressed and compression is desired var inputInfo types.BlobInfo - if !canCompress || isCompressed || !dest.ShouldCompressLayers() { + if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() { logrus.Debugf("Using original blob without modification") inputInfo = srcInfo } else { @@ -454,7 +490,7 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf } // === Finally, send the layer stream to dest. - uploadedInfo, err := dest.PutBlob(destStream, inputInfo) + uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) if err != nil { return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err) } diff --git a/directory/directory_dest.go b/directory/directory_dest.go index 6d76d30ecb..2a8d102e55 100644 --- a/directory/directory_dest.go +++ b/directory/directory_dest.go @@ -94,6 +94,25 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo return types.BlobInfo{Digest: computedDigest, Size: size}, nil } +func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, -1, types.ErrBlobNotFound + } + if err != nil { + return false, -1, err + } + return true, finfo.Size(), nil +} + +func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func (d *dirImageDestination) PutManifest(manifest []byte) error { return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) } diff --git a/docker/daemon/daemon_dest.go b/docker/daemon/daemon_dest.go index 70db947fa5..7c4576ddb0 100644 --- a/docker/daemon/daemon_dest.go +++ b/docker/daemon/daemon_dest.go @@ -29,7 +29,8 @@ type daemonImageDestination struct { writer *io.PipeWriter tar *tar.Writer // Other state - committed bool // writer has been closed + committed bool // writer has been closed + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs } // newImageDestination returns a types.ImageDestination for the specified image reference. @@ -62,6 +63,7 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t writer: writer, tar: tar.NewWriter(writer), committed: false, + blobs: make(map[digest.Digest]types.BlobInfo), }, nil } @@ -142,8 +144,8 @@ func (d *daemonImageDestination) AcceptsForeignLayerURLs() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if inputInfo.Digest.String() == "" { - return types.BlobInfo{}, fmt.Errorf(`"Can not stream a blob with unknown digest to "docker-daemon:"`) + if ok, size, err := d.HasBlob(inputInfo); err == nil && ok { + return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil } if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. @@ -173,9 +175,24 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil { return types.BlobInfo{}, err } + d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size} return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil } +func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + if blob, ok := d.blobs[info.Digest]; ok { + return true, blob.Size, nil + } + return false, -1, types.ErrBlobNotFound +} + +func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func (d *daemonImageDestination) PutManifest(m []byte) error { var man schema2Manifest if err := json.Unmarshal(m, &man); err != nil { diff --git a/docker/docker_client.go b/docker/docker_client.go index 30ada72875..35ecb96eac 100644 --- a/docker/docker_client.go +++ b/docker/docker_client.go @@ -17,7 +17,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/containers/image/types" - "github.com/docker/docker/pkg/homedir" + "github.com/containers/storage/pkg/homedir" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" ) diff --git a/docker/docker_client_test.go b/docker/docker_client_test.go index dc1425df8b..937603572c 100644 --- a/docker/docker_client_test.go +++ b/docker/docker_client_test.go @@ -11,7 +11,7 @@ import ( "testing" "github.com/containers/image/types" - "github.com/docker/docker/pkg/homedir" + "github.com/containers/storage/pkg/homedir" ) func TestGetAuth(t *testing.T) { diff --git a/docker/docker_image_dest.go b/docker/docker_image_dest.go index e34c7237ee..54df01d716 100644 --- a/docker/docker_image_dest.go +++ b/docker/docker_image_dest.go @@ -163,6 +163,39 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil } +func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), info.Digest.String()) + + logrus.Debugf("Checking %s", checkURL) + res, err := d.c.makeRequest("HEAD", checkURL, nil, nil) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName()) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, types.ErrBlobNotFound + default: + logrus.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode)) + } + logrus.Debugf("... failed, status %d, ignoring", res.StatusCode) + return false, -1, types.ErrBlobNotFound +} + +func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func (d *dockerImageDestination) PutManifest(m []byte) error { digest, err := manifest.Digest(m) if err != nil { diff --git a/image/docker_schema2_test.go b/image/docker_schema2_test.go index 4204146e67..73249d0088 100644 --- a/image/docker_schema2_test.go +++ b/image/docker_schema2_test.go @@ -374,6 +374,12 @@ func (d *memoryImageDest) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (t d.storedBlobs[inputInfo.Digest] = contents return types.BlobInfo{Digest: inputInfo.Digest, Size: int64(len(contents))}, nil } +func (d *memoryImageDest) HasBlob(inputInfo types.BlobInfo) (bool, int64, error) { + panic("Unexpected call to a mock function") +} +func (d *memoryImageDest) ReapplyBlob(inputInfo types.BlobInfo) (types.BlobInfo, error) { + panic("Unexpected call to a mock function") +} func (d *memoryImageDest) PutManifest([]byte) error { panic("Unexpected call to a mock function") } diff --git a/image/memory.go b/image/memory.go index 568f855acb..9404db826e 100644 --- a/image/memory.go +++ b/image/memory.go @@ -35,6 +35,15 @@ func (i *memoryImage) Reference() types.ImageReference { func (i *memoryImage) Close() { } +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + s, err := i.serialize() + if err != nil { + return -1, err + } + return int64(len(s)), nil +} + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (i *memoryImage) Manifest() ([]byte, string, error) { if i.serializedManifest == nil { diff --git a/image/sourced.go b/image/sourced.go index a7c25ab13c..ef35b3c32a 100644 --- a/image/sourced.go +++ b/image/sourced.go @@ -71,6 +71,11 @@ func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) { }, nil } +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + // Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. func (i *sourcedImage) Manifest() ([]byte, string, error) { return i.manifestBlob, i.manifestMIMEType, nil diff --git a/oci/layout/oci_dest.go b/oci/layout/oci_dest.go index 55581c7e41..1c849e0d30 100644 --- a/oci/layout/oci_dest.go +++ b/oci/layout/oci_dest.go @@ -112,6 +112,28 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo return types.BlobInfo{Digest: computedDigest, Size: size}, nil } +func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest) + if err != nil { + return false, -1, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, -1, types.ErrBlobNotFound + } + if err != nil { + return false, -1, err + } + return true, finfo.Size(), nil +} + +func (d *ociImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + func createManifest(m []byte) ([]byte, string, error) { om := imgspecv1.Manifest{} mt := manifest.GuessMIMEType(m) diff --git a/openshift/openshift.go b/openshift/openshift.go index 65b80f92c4..a8f666818a 100644 --- a/openshift/openshift.go +++ b/openshift/openshift.go @@ -366,6 +366,14 @@ func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.Bl return d.docker.PutBlob(stream, inputInfo) } +func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + return d.docker.HasBlob(info) +} + +func (d *openshiftImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return d.docker.ReapplyBlob(info) +} + func (d *openshiftImageDestination) PutManifest(m []byte) error { manifestDigest, err := manifest.Digest(m) if err != nil { diff --git a/storage/storage_image.go b/storage/storage_image.go new file mode 100644 index 0000000000..132c5ad5d0 --- /dev/null +++ b/storage/storage_image.go @@ -0,0 +1,570 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/image" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/storage" + ddigest "github.com/docker/distribution/digest" +) + +var ( + // ErrBlobDigestMismatch is returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + ErrBlobDigestMismatch = errors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = errors.New("blob size mismatch") + // ErrNoManifestLists is returned when GetTargetManifest() is + // called. + ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary + Manifest []byte `json:"-"` // Manifest contents, temporary + Signatures []byte `json:"-"` // Signature contents, temporary + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice +} + +type storageLayerMetadata struct { + Digest string `json:"digest,omitempty"` + Size int64 `json:"size"` + CompressedSize int64 `json:"compressed-size,omitempty"` +} + +type storageImage struct { + types.Image + size int64 +} + +// newImageSource sets us up to read out an image, which needs to already exist. +func newImageSource(imageRef storageReference) (*storageImageSource, error) { + id := imageRef.resolveID() + if id == "" { + logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport()) + return nil, ErrNoSuchImage + } + img, err := imageRef.transport.store.GetImage(id) + if err != nil { + return nil, fmt.Errorf("error reading image %q: %v", id, err) + } + image := &storageImageSource{ + imageRef: imageRef, + Created: time.Now(), + ID: img.ID, + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + LayerPosition: make(map[ddigest.Digest]int), + SignatureSizes: []int{}, + } + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, fmt.Errorf("error decoding metadata for source image: %v", err) + } + return image, nil +} + +// newImageDestination sets us up to write a new image. +func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + image := &storageImageDestination{ + imageRef: imageRef, + Tag: imageRef.reference, + Created: time.Now(), + ID: imageRef.id, + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + BlobData: make(map[ddigest.Digest][]byte), + SignatureSizes: []int{}, + } + return image, nil +} + +func (s storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +func (s storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +func (s storageImageSource) Close() { +} + +func (s storageImageDestination) Close() { +} + +func (s storageImageDestination) ShouldCompressLayers() bool { + // We ultimately have to decompress layers to populate trees on disk, + // so callers shouldn't bother compressing them before handing them to + // us, if they're not already compressed. + return false +} + +// PutBlob is used to both store filesystem layers and binary data that is part +// of the image. Filesystem layers are assumed to be imported in order, as +// that is required by some of the underlying storage drivers. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { + blobSize := int64(-1) + digest := blobinfo.Digest + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + // Try to read an initial snippet of the blob. + header := make([]byte, 10240) + n, err := stream.Read(header) + if err != nil && err != io.EOF { + return errorBlobInfo, err + } + // Set up to read the whole blob (the initial snippet, plus the rest) + // while digesting it with either the default, or the passed-in digest, + // if one was specified. + hasher := ddigest.Canonical.New() + if digest.Validate() == nil { + if a := digest.Algorithm(); a.Available() { + hasher = a.New() + } + } + hash := "" + counter := ioutils.NewWriteCounter(hasher.Hash()) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream) + multi := io.TeeReader(defragmented, counter) + if (n > 0) && archive.IsArchive(header[:n]) { + // It's a filesystem layer. If it's not the first one in the + // image, we assume that the most recently added layer is its + // parent. + parentLayer := "" + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + parentLayer = layerList[len(layerList)-1] + } + } + // If we have an expected content digest, generate a layer ID + // based on the parent's ID and the expected content digest. + id := "" + if digest.Validate() == nil { + id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() + } + // Attempt to create the identified layer and import its contents. + layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) + if err != nil && err != storage.ErrDuplicateID { + logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + if err == storage.ErrDuplicateID { + // We specified an ID, and there's already a layer with + // the same ID. Drain the input so that we can look at + // its length and digest. + _, err := io.Copy(ioutil.Discard, multi) + if err != nil && err != io.EOF { + logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + hash = hasher.Digest().String() + } else { + // Applied the layer with the specified ID. Note the + // size info and computed digest. + hash = hasher.Digest().String() + layerMeta := storageLayerMetadata{ + Digest: hash, + CompressedSize: counter.Count, + Size: uncompressedSize, + } + if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { + s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) + } + // Hang on to the new layer's ID. + id = layer.ID + } + blobSize = counter.Count + // Check if the size looks right. + if blobinfo.Size >= 0 && blobSize != blobinfo.Size { + logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobSizeMismatch + } + // If the content digest was specified, verify it. + if digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Record that this layer blob is a layer, and the layer ID it + // ended up having. This is a list, in case the same blob is + // being applied more than once. + s.Layers[digest] = append(s.Layers[digest], id) + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) + if layer != nil { + logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) + } else { + logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) + } + } else { + // It's just data. Finish scanning it in, check that our + // computed digest matches the passed-in digest, and store it, + // but leave it out of the blob-to-layer-ID map so that we can + // tell that it's not a layer. + blob, err := ioutil.ReadAll(multi) + if err != nil && err != io.EOF { + return errorBlobInfo, err + } + blobSize = int64(len(blob)) + hash = hasher.Digest().String() + if blobinfo.Size >= 0 && blobSize != blobinfo.Size { + logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) + return errorBlobInfo, ErrBlobSizeMismatch + } + // If we were given a digest, verify that the content matches + // it. + if digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Save the blob for when we Commit(). + s.BlobData[digest] = blob + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) + logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + } + return types.BlobInfo{ + Digest: digest, + Size: blobSize, + }, nil +} + +func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + for _, blob := range s.BlobList { + if blob.Digest == blobinfo.Digest { + return true, blob.Size, nil + } + } + return false, -1, types.ErrBlobNotFound +} + +func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { + err := blobinfo.Digest.Validate() + if err != nil { + return types.BlobInfo{}, err + } + if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.GetImageBigData(s.ID, blobinfo.Digest.String()) + if err != nil { + return types.BlobInfo{}, err + } + return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + } + layerList := s.Layers[blobinfo.Digest] + rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) + if err != nil { + return types.BlobInfo{}, err + } + return s.PutBlob(rc, blobinfo) +} + +func (s *storageImageDestination) Commit() error { + // Create the image record. + lastLayer := "" + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + lastLayer = layerList[len(layerList)-1] + } + } + img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + if err != nil { + logrus.Debugf("error creating image: %q", err) + return err + } + logrus.Debugf("created new image ID %q", img.ID) + s.ID = img.ID + if s.Tag != "" { + // We have a name to set, so move the name to this image. + if err := s.imageRef.transport.store.SetNames(img.ID, []string{s.Tag}); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error setting names on image %q: %v", img.ID, err) + return err + } + logrus.Debugf("set name of image %q to %q", img.ID, s.Tag) + } + // Save the data blobs to disk, and drop their contents from memory. + keys := []ddigest.Digest{} + for k, v := range s.BlobData { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) + return err + } + keys = append(keys, k) + } + for _, key := range keys { + delete(s.BlobData, key) + } + // Save the manifest, if we have one. + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return err + } + // Save the signatures, if we have any. + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return err + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return err + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + return nil +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +func (s *storageImageDestination) PutManifest(manifest []byte) error { + s.Manifest = make([]byte, len(manifest)) + copy(s.Manifest, manifest) + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to +// return data that was previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures() error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + s.Signatures = sigblob + s.SignatureSizes = sizes + return nil +} + +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.GetImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // If the blob was "put" more than once, we have multiple layer IDs + // which should all produce the same diff. For the sake of tests that + // want to make sure we created different layers each time the blob was + // "put", though, cycle through the layers. + layerList := s.Layers[info.Digest] + position, ok := s.LayerPosition[info.Digest] + if !ok { + position = 0 + } + s.LayerPosition[info.Digest] = (position + 1) % len(layerList) + logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) + rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) + return rc, n, layerList[position], err +} + +func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { + layer, err := store.GetLayer(layerID) + if err != nil { + return nil, -1, err + } + layerMeta := storageLayerMetadata{ + CompressedSize: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return nil, -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) + } + } + if layerMeta.CompressedSize <= 0 { + n = -1 + } else { + n = layerMeta.CompressedSize + } + diff, err := store.Diff("", layer.ID) + if err != nil { + return nil, -1, err + } + return diff, n, nil +} + +func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { + manifestBlob, err = s.imageRef.transport.store.GetImageBigData(s.ID, "manifest") + return manifestBlob, manifest.GuessMIMEType(manifestBlob), err +} + +func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { + return nil, "", ErrNoManifestLists +} + +func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) { + var offset int + signature, err := s.imageRef.transport.store.GetImageBigData(s.ID, "signatures") + if err != nil { + return nil, err + } + sigslice := [][]byte{} + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) + if err != nil { + return -1, fmt.Errorf("error reading image %q: %v", s.imageRef.id, err) + } + for _, name := range names { + bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name) + if err != nil { + return -1, fmt.Errorf("error reading data blob size %q for %q: %v", name, s.imageRef.id, err) + } + sum += bigSize + } + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + for _, layerList := range s.Layers { + for _, layerID := range layerList { + layer, err := s.imageRef.transport.store.GetLayer(layerID) + if err != nil { + return -1, err + } + layerMeta := storageLayerMetadata{ + Size: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) + } + } + if layerMeta.Size < 0 { + return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layerMeta.Size + } + } + return sum, nil +} + +func (s *storageImage) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(s storageReference) (types.Image, error) { + src, err := newImageSource(s) + if err != nil { + return nil, err + } + img, err := image.FromSource(src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImage{Image: img, size: size}, nil +} diff --git a/storage/storage_reference.go b/storage/storage_reference.go new file mode 100644 index 0000000000..13413df1bc --- /dev/null +++ b/storage/storage_reference.go @@ -0,0 +1,127 @@ +package storage + +import ( + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +type storageReference struct { + transport storageTransport + reference string + id string + name reference.Named +} + +func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + reference: reference, + id: id, + name: name, + } +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID. +func (s *storageReference) resolveID() string { + if s.id == "" { + image, err := s.transport.store.GetImage(s.reference) + if image != nil && err == nil { + s.id = image.ID + } + } + return s.id +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + } +} + +// Return a name with a tag, if we have a name to base them on. +func (s storageReference) DockerReference() reference.Named { + return s.name +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]" + if s.name == nil { + return storeSpec + "@" + s.id + } + if s.id == "" { + return storeSpec + s.reference + } + return storeSpec + s.reference + "@" + s.id +} + +func (s storageReference) PolicyConfigurationIdentity() string { + return s.StringWithinTransport() +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GetGraphRoot() + "]" + namespaces := []string{} + if s.name != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.reference) + } + components := strings.Split(s.name.FullName(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) { + return newImage(s) +} + +func (s storageReference) DeleteImage(ctx *types.SystemContext) error { + id := s.resolveID() + if id == "" { + logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return ErrNoSuchImage + } + layers, err := s.transport.store.DeleteImage(id, true) + if err == nil { + logrus.Debugf("deleted image %q", id) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { + return newImageSource(s) +} + +func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(s) +} diff --git a/storage/storage_reference_test.go b/storage/storage_reference_test.go new file mode 100644 index 0000000000..687d1005dc --- /dev/null +++ b/storage/storage_reference_test.go @@ -0,0 +1,97 @@ +package storage + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStorageReferenceTransport(t *testing.T) { + newStore(t) + ref, err := Transport.ParseReference("busybox") + require.NoError(t, err) + transport := ref.Transport() + st, ok := transport.(*storageTransport) + require.True(t, ok) + assert.Equal(t, *(Transport.(*storageTransport)), *st) +} + +func TestStorageReferenceDockerReference(t *testing.T) { + ref, err := Transport.ParseReference("busybox") + require.NoError(t, err) + dr := ref.DockerReference() + require.NotNil(t, dr) + assert.Equal(t, "busybox:latest", dr.String()) + + ref, err = Transport.ParseReference("@" + sha256digestHex) + require.NoError(t, err) + + dr = ref.DockerReference() + assert.Nil(t, dr) +} + +// A common list of reference formats to test for the various ImageReference methods. +var validReferenceTestCases = []struct { + input, canonical string + namespaces []string +}{ + { + "busybox", "docker.io/library/busybox:latest", + []string{"docker.io/library/busybox", "docker.io/library", "docker.io"}, + }, + { + "example.com/myns/ns2/busybox:notlatest", "example.com/myns/ns2/busybox:notlatest", + []string{"example.com/myns/ns2/busybox", "example.com/myns/ns2", "example.com/myns", "example.com"}, + }, + { + "@" + sha256digestHex, "@" + sha256digestHex, + []string{}, + }, + { + "busybox@" + sha256digestHex, "docker.io/library/busybox:latest@" + sha256digestHex, + []string{"docker.io/library/busybox:latest", "docker.io/library/busybox", "docker.io/library", "docker.io"}, + }, +} + +func TestStorageReferenceStringWithinTransport(t *testing.T) { + store := newStore(t) + storeSpec := fmt.Sprintf("[%s@%s]", store.GetGraphDriverName(), store.GetGraphRoot()) + + for _, c := range validReferenceTestCases { + ref, err := Transport.ParseReference(c.input) + require.NoError(t, err, c.input) + assert.Equal(t, storeSpec+c.canonical, ref.StringWithinTransport(), c.input) + } +} + +func TestStorageReferencePolicyConfigurationIdentity(t *testing.T) { + store := newStore(t) + storeSpec := fmt.Sprintf("[%s@%s]", store.GetGraphDriverName(), store.GetGraphRoot()) + + for _, c := range validReferenceTestCases { + ref, err := Transport.ParseReference(c.input) + require.NoError(t, err, c.input) + assert.Equal(t, storeSpec+c.canonical, ref.PolicyConfigurationIdentity(), c.input) + } +} + +func TestStorageReferencePolicyConfigurationNamespaces(t *testing.T) { + store := newStore(t) + storeSpec := fmt.Sprintf("[%s@%s]", store.GetGraphDriverName(), store.GetGraphRoot()) + + for _, c := range validReferenceTestCases { + ref, err := Transport.ParseReference(c.input) + require.NoError(t, err, c.input) + expectedNS := []string{} + for _, ns := range c.namespaces { + expectedNS = append(expectedNS, storeSpec+ns) + } + expectedNS = append(expectedNS, storeSpec) + expectedNS = append(expectedNS, fmt.Sprintf("[%s]", store.GetGraphRoot())) + assert.Equal(t, expectedNS, ref.PolicyConfigurationNamespaces()) + } +} + +// NewImage, NewImageSource, NewImageDestination, DeleteImage tested in storage_test.go diff --git a/storage/storage_test.go b/storage/storage_test.go new file mode 100644 index 0000000000..fc4f9e6322 --- /dev/null +++ b/storage/storage_test.go @@ -0,0 +1,882 @@ +package storage + +import ( + "archive/tar" + "bytes" + "crypto/rand" + "crypto/sha256" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/storage" + ddigest "github.com/docker/distribution/digest" +) + +var ( + _imgd types.ImageDestination = &storageImageDestination{} + _imgs types.ImageSource = &storageImageSource{} + _ref types.ImageReference = &storageReference{} + _transport types.ImageTransport = &storageTransport{} + topwd = "" +) + +const ( + layerSize = 12345 +) + +func TestMain(m *testing.M) { + if reexec.Init() { + return + } + wd, err := ioutil.TempDir("", "test.") + if err != nil { + os.Exit(1) + } + topwd = wd + debug := false + flag.BoolVar(&debug, "debug", false, "print debug statements") + flag.Parse() + if debug { + logrus.SetLevel(logrus.DebugLevel) + } + code := m.Run() + os.RemoveAll(wd) + os.Exit(code) +} + +func newStore(t *testing.T) storage.Store { + wd, err := ioutil.TempDir(topwd, "test.") + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(wd, 0700) + if err != nil { + t.Fatal(err) + } + run := filepath.Join(wd, "run") + root := filepath.Join(wd, "root") + uidmap := []idtools.IDMap{{ + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }} + gidmap := []idtools.IDMap{{ + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }} + store, err := storage.GetStore(storage.StoreOptions{ + RunRoot: run, + GraphRoot: root, + GraphDriverName: "vfs", + GraphDriverOptions: []string{}, + UidMap: uidmap, + GidMap: gidmap, + }) + if err != nil { + t.Fatal(err) + } + Transport.SetStore(store) + return store +} + +func TestParse(t *testing.T) { + store := newStore(t) + + ref, err := Transport.ParseReference("test") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + ref, err = Transport.ParseStoreReference(store, "test") + if err != nil { + t.Fatalf("ParseStoreReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseStoreReference(%q) returned nil reference", "test") + } + + strRef := ref.StringWithinTransport() + ref, err = Transport.ParseReference(strRef) + if err != nil { + t.Fatalf("ParseReference(%q) returned error: %v", strRef, err) + } + if ref == nil { + t.Fatalf("ParseReference(%q) returned nil reference", strRef) + } + + transport := storageTransport{ + store: store, + } + _references := []storageReference{ + { + name: ref.(*storageReference).name, + reference: verboseName(ref.(*storageReference).name), + id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + transport: transport, + }, + { + name: ref.(*storageReference).name, + reference: verboseName(ref.(*storageReference).name), + transport: transport, + }, + { + id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + transport: transport, + }, + { + name: ref.DockerReference(), + reference: verboseName(ref.DockerReference()), + transport: transport, + }, + } + for _, reference := range _references { + s := reference.StringWithinTransport() + ref, err := Transport.ParseStoreReference(store, s) + if err != nil { + t.Fatalf("ParseReference(%q) returned error: %v", strRef, err) + } + if ref.id != reference.id { + t.Fatalf("ParseReference(%q) failed to extract ID", s) + } + if ref.reference != reference.reference { + t.Fatalf("ParseReference(%q) failed to extract reference (%q!=%q)", s, ref.reference, reference.reference) + } + } +} + +func systemContext() *types.SystemContext { + return &types.SystemContext{} +} + +func makeLayer(t *testing.T, compression archive.Compression) (ddigest.Digest, int64, int64, []byte) { + var cwriter io.WriteCloser + var uncompressed *ioutils.WriteCounter + var twriter *tar.Writer + preader, pwriter := io.Pipe() + tbuffer := bytes.Buffer{} + if compression != archive.Uncompressed { + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + t.Fatalf("Error compressing layer: %v", err) + } + cwriter = compressor + uncompressed = ioutils.NewWriteCounter(cwriter) + } else { + uncompressed = ioutils.NewWriteCounter(pwriter) + } + twriter = tar.NewWriter(uncompressed) + buf := make([]byte, layerSize) + n, err := rand.Read(buf) + if err != nil { + t.Fatalf("Error reading tar data: %v", err) + } + if n != len(buf) { + t.Fatalf("Short read reading tar data: %d < %d", n, len(buf)) + } + for i := 1024; i < 2048; i++ { + buf[i] = 0 + } + go func() { + defer pwriter.Close() + if cwriter != nil { + defer cwriter.Close() + } + defer twriter.Close() + err := twriter.WriteHeader(&tar.Header{ + Name: "/random-single-file", + Mode: 0600, + Size: int64(len(buf)), + ModTime: time.Now(), + AccessTime: time.Now(), + ChangeTime: time.Now(), + Typeflag: tar.TypeReg, + }) + if err != nil { + t.Fatalf("Error writing tar header: %v", err) + } + n, err := twriter.Write(buf) + if err != nil { + t.Fatalf("Error writing tar header: %v", err) + } + if n != len(buf) { + t.Fatalf("Short write writing tar header: %d < %d", n, len(buf)) + } + }() + _, err = io.Copy(&tbuffer, preader) + if err != nil { + t.Fatalf("Error reading layer tar: %v", err) + } + sum := ddigest.SHA256.FromBytes(tbuffer.Bytes()) + return sum, uncompressed.Count, int64(tbuffer.Len()), tbuffer.Bytes() +} + +func TestWriteRead(t *testing.T) { + if os.Geteuid() != 0 { + t.Skip("TestWriteRead requires root privileges") + } + + config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` + sum := ddigest.SHA256.FromBytes([]byte(config)) + configInfo := types.BlobInfo{ + Digest: sum, + Size: int64(len(config)), + } + manifests := []string{ + //`{ + // "schemaVersion": 2, + // "mediaType": "application/vnd.oci.image.manifest.v1+json", + // "config": { + // "mediaType": "application/vnd.oci.image.serialization.config.v1+json", + // "size": %cs, + // "digest": "%ch" + // }, + // "layers": [ + // { + // "mediaType": "application/vnd.oci.image.serialization.rootfs.tar.gzip", + // "digest": "%lh", + // "size": %ls + // } + // ] + //}`, + `{ + "schemaVersion": 1, + "name": "test", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "%lh" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"%li\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":%ls}" + } + ] + }`, + `{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": %cs, + "digest": "%ch" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "%lh", + "size": %ls + } + ] + }`, + } + signatures := [][]byte{ + []byte("Signature A"), + []byte("Signature B"), + } + newStore(t) + ref, err := Transport.ParseReference("test") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + for _, manifestFmt := range manifests { + dest, err := ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) + } + if dest.Reference().StringWithinTransport() != ref.StringWithinTransport() { + t.Fatalf("NewImageDestination(%q) changed the reference to %q", ref.StringWithinTransport(), dest.Reference().StringWithinTransport()) + } + t.Logf("supported manifest MIME types: %v", dest.SupportedManifestMIMETypes()) + if err := dest.SupportsSignatures(); err != nil { + t.Fatalf("Destination image doesn't support signatures: %v", err) + } + t.Logf("compress layers: %v", dest.ShouldCompressLayers()) + compression := archive.Uncompressed + if dest.ShouldCompressLayers() { + compression = archive.Gzip + } + digest, decompressedSize, size, blob := makeLayer(t, compression) + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: size, + Digest: digest, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer to destination: %v", err) + } + t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", digest, size, decompressedSize) + if _, err := dest.PutBlob(bytes.NewBufferString(config), configInfo); err != nil { + t.Fatalf("Error saving config to destination: %v", err) + } + manifest := strings.Replace(manifestFmt, "%lh", digest.String(), -1) + manifest = strings.Replace(manifest, "%ch", configInfo.Digest.String(), -1) + manifest = strings.Replace(manifest, "%ls", fmt.Sprintf("%d", size), -1) + manifest = strings.Replace(manifest, "%cs", fmt.Sprintf("%d", configInfo.Size), -1) + li := digest.Hex() + manifest = strings.Replace(manifest, "%li", li, -1) + manifest = strings.Replace(manifest, "%ci", sum.Hex(), -1) + t.Logf("this manifest is %q", manifest) + if err := dest.PutManifest([]byte(manifest)); err != nil { + t.Fatalf("Error saving manifest to destination: %v", err) + } + if err := dest.PutSignatures(signatures); err != nil { + t.Fatalf("Error saving signatures to destination: %v", err) + } + if err := dest.Commit(); err != nil { + t.Fatalf("Error committing changes to destination: %v", err) + } + dest.Close() + + img, err := ref.NewImage(systemContext()) + if err != nil { + t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) + } + imageConfigInfo := img.ConfigInfo() + if imageConfigInfo.Digest != "" { + blob, err := img.ConfigBlob() + if err != nil { + t.Fatalf("image %q claimed there was a config blob, but couldn't produce it: %v", ref.StringWithinTransport(), err) + } + sum := ddigest.SHA256.FromBytes(blob) + if sum != configInfo.Digest { + t.Fatalf("image config blob digest for %q doesn't match", ref.StringWithinTransport()) + } + if int64(len(blob)) != configInfo.Size { + t.Fatalf("image config size for %q changed from %d to %d", ref.StringWithinTransport(), configInfo.Size, len(blob)) + } + } + layerInfos := img.LayerInfos() + if layerInfos == nil { + t.Fatalf("image for %q returned empty layer list", ref.StringWithinTransport()) + } + imageInfo, err := img.Inspect() + if err != nil { + t.Fatalf("Inspect(%q) returned error %v", ref.StringWithinTransport(), err) + } + if imageInfo.Created.IsZero() { + t.Fatalf("Image %q claims to have been created at time 0", ref.StringWithinTransport()) + } + + src, err := ref.NewImageSource(systemContext(), []string{}) + if err != nil { + t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) + } + if src == nil { + t.Fatalf("NewImageSource(%q) returned no source", ref.StringWithinTransport()) + } + if src.Reference().StringWithinTransport() != ref.StringWithinTransport() { + // As long as it's only the addition of an ID suffix, that's okay. + if !strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@") { + t.Fatalf("NewImageSource(%q) changed the reference to %q", ref.StringWithinTransport(), src.Reference().StringWithinTransport()) + } + } + retrievedManifest, manifestType, err := src.GetManifest() + if err != nil { + t.Fatalf("GetManifest(%q) returned error %v", ref.StringWithinTransport(), err) + } + t.Logf("this manifest's type appears to be %q", manifestType) + if string(retrievedManifest) != manifest { + t.Fatalf("NewImageSource(%q) changed the manifest: %q was %q", ref.StringWithinTransport(), string(retrievedManifest), manifest) + } + sum = ddigest.SHA256.FromBytes([]byte(manifest)) + _, _, err = src.GetTargetManifest(sum) + if err == nil { + t.Fatalf("GetTargetManifest(%q) is supposed to fail", ref.StringWithinTransport()) + } + sigs, err := src.GetSignatures() + if err != nil { + t.Fatalf("GetSignatures(%q) returned error %v", ref.StringWithinTransport(), err) + } + if len(sigs) < len(signatures) { + t.Fatalf("Lost %d signatures", len(signatures)-len(sigs)) + } + if len(sigs) > len(signatures) { + t.Fatalf("Gained %d signatures", len(sigs)-len(signatures)) + } + for i := range sigs { + if bytes.Compare(sigs[i], signatures[i]) != 0 { + t.Fatalf("Signature %d was corrupted", i) + } + } + for _, layerInfo := range layerInfos { + buf := bytes.Buffer{} + layer, size, err := src.GetBlob(layerInfo) + if err != nil { + t.Fatalf("Error reading layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) + } + t.Logf("Decompressing blob %q, blob size = %d, layerInfo.Size = %d bytes", layerInfo.Digest, size, layerInfo.Size) + hasher := sha256.New() + compressed := ioutils.NewWriteCounter(hasher) + countedLayer := io.TeeReader(layer, compressed) + decompressed, err := archive.DecompressStream(countedLayer) + if err != nil { + t.Fatalf("Error decompressing layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) + } + n, err := io.Copy(&buf, decompressed) + if layerInfo.Size >= 0 && compressed.Count != layerInfo.Size { + t.Fatalf("Blob size is different than expected: %d != %d, read %d", compressed.Count, layerInfo.Size, n) + } + if size >= 0 && compressed.Count != size { + t.Fatalf("Blob size mismatch: %d != %d, read %d", compressed.Count, size, n) + } + sum := hasher.Sum(nil) + if ddigest.NewDigestFromBytes(ddigest.SHA256, sum) != layerInfo.Digest { + t.Fatalf("Layer blob digest for %q doesn't match", ref.StringWithinTransport()) + } + } + src.Close() + img.Close() + err = ref.DeleteImage(systemContext()) + if err != nil { + t.Fatalf("DeleteImage(%q) returned error %v", ref.StringWithinTransport(), err) + } + } +} + +func TestDuplicateName(t *testing.T) { + if os.Geteuid() != 0 { + t.Skip("TestDuplicateName requires root privileges") + } + + newStore(t) + + ref, err := Transport.ParseReference("test") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + dest, err := ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) + } + digest, _, size, blob := makeLayer(t, archive.Uncompressed) + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: size, + Digest: digest, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) + } + if err := dest.Commit(); err != nil { + t.Fatalf("Error committing changes to destination, first pass: %v", err) + } + dest.Close() + + dest, err = ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) + } + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: int64(size), + Digest: digest, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) + } + if err := dest.Commit(); err != nil { + t.Fatalf("Error committing changes to destination, second pass: %v", err) + } + dest.Close() +} + +func TestDuplicateID(t *testing.T) { + if os.Geteuid() != 0 { + t.Skip("TestDuplicateID requires root privileges") + } + + newStore(t) + + ref, err := Transport.ParseReference("@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + dest, err := ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) + } + digest, _, size, blob := makeLayer(t, archive.Gzip) + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: size, + Digest: digest, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) + } + if err := dest.Commit(); err != nil { + t.Fatalf("Error committing changes to destination, first pass: %v", err) + } + dest.Close() + + dest, err = ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) + } + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: int64(size), + Digest: digest, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) + } + if err := dest.Commit(); err != storage.ErrDuplicateID { + if err != nil { + t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) + } + t.Fatalf("Incorrectly succeeded committing changes to destination, second pass: %v", err) + } + dest.Close() +} + +func TestDuplicateNameID(t *testing.T) { + if os.Geteuid() != 0 { + t.Skip("TestDuplicateNameID requires root privileges") + } + + newStore(t) + + ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + dest, err := ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) + } + digest, _, size, blob := makeLayer(t, archive.Gzip) + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: size, + Digest: digest, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) + } + if err := dest.Commit(); err != nil { + t.Fatalf("Error committing changes to destination, first pass: %v", err) + } + dest.Close() + + dest, err = ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) + } + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: int64(size), + Digest: digest, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) + } + if err := dest.Commit(); err != storage.ErrDuplicateID { + if err != nil { + t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) + } + t.Fatalf("Incorrectly succeeded committing changes to destination, second pass: %v", err) + } + dest.Close() +} + +func TestNamespaces(t *testing.T) { + newStore(t) + + ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + namespaces := ref.PolicyConfigurationNamespaces() + for _, namespace := range namespaces { + t.Logf("namespace: %q", namespace) + err = Transport.ValidatePolicyConfigurationScope(namespace) + if ref == nil { + t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err) + } + } + namespace := ref.StringWithinTransport() + t.Logf("ref: %q", namespace) + err = Transport.ValidatePolicyConfigurationScope(namespace) + if err != nil { + t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err) + } + for _, namespace := range []string{ + "@beefee", + ":miracle", + ":miracle@beefee", + "@beefee:miracle", + } { + t.Logf("invalid ref: %q", namespace) + err = Transport.ValidatePolicyConfigurationScope(namespace) + if err == nil { + t.Fatalf("ValidatePolicyConfigurationScope(%q) should have failed", namespace) + } + } +} + +func TestSize(t *testing.T) { + if os.Geteuid() != 0 { + t.Skip("TestSize requires root privileges") + } + + config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` + sum := ddigest.SHA256.FromBytes([]byte(config)) + configInfo := types.BlobInfo{ + Digest: sum, + Size: int64(len(config)), + } + + newStore(t) + + ref, err := Transport.ParseReference("test") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + dest, err := ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) + } + digest1, _, size1, blob := makeLayer(t, archive.Gzip) + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: size1, + Digest: digest1, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer 1 to destination: %v", err) + } + digest2, _, size2, blob := makeLayer(t, archive.Gzip) + if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{ + Size: size2, + Digest: digest2, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer 2 to destination: %v", err) + } + manifest := fmt.Sprintf(` + { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": %d, + "digest": "%s" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "%s", + "size": %d + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "%s", + "size": %d + } + ] + } + `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2) + if err := dest.PutManifest([]byte(manifest)); err != nil { + t.Fatalf("Error storing manifest to destination: %v", err) + } + if err := dest.Commit(); err != nil { + t.Fatalf("Error committing changes to destination: %v", err) + } + dest.Close() + + img, err := ref.NewImage(systemContext()) + if err != nil { + t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) + } + usize, err := img.Size() + if usize == -1 || err != nil { + t.Fatalf("Error calculating image size: %v", err) + } + if int(usize) != layerSize*2+len(manifest) { + t.Fatalf("Unexpected image size: %d != %d + %d + %d", usize, layerSize, layerSize, len(manifest)) + } + img.Close() +} + +func TestDuplicateBlob(t *testing.T) { + if os.Geteuid() != 0 { + t.Skip("TestDuplicateBlob requires root privileges") + } + + config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` + sum := ddigest.SHA256.FromBytes([]byte(config)) + configInfo := types.BlobInfo{ + Digest: sum, + Size: int64(len(config)), + } + + newStore(t) + + ref, err := Transport.ParseReference("test") + if err != nil { + t.Fatalf("ParseReference(%q) returned error %v", "test", err) + } + if ref == nil { + t.Fatalf("ParseReference returned nil reference") + } + + dest, err := ref.NewImageDestination(systemContext()) + if err != nil { + t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) + } + if dest == nil { + t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) + } + digest1, _, size1, blob1 := makeLayer(t, archive.Gzip) + if _, err := dest.PutBlob(bytes.NewBuffer(blob1), types.BlobInfo{ + Size: size1, + Digest: digest1, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer 1 to destination (first copy): %v", err) + } + digest2, _, size2, blob2 := makeLayer(t, archive.Gzip) + if _, err := dest.PutBlob(bytes.NewBuffer(blob2), types.BlobInfo{ + Size: size2, + Digest: digest2, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer 2 to destination (first copy): %v", err) + } + if _, err := dest.PutBlob(bytes.NewBuffer(blob1), types.BlobInfo{ + Size: size1, + Digest: digest1, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer 1 to destination (second copy): %v", err) + } + if _, err := dest.PutBlob(bytes.NewBuffer(blob2), types.BlobInfo{ + Size: size2, + Digest: digest2, + }); err != nil { + t.Fatalf("Error saving randomly-generated layer 2 to destination (second copy): %v", err) + } + manifest := fmt.Sprintf(` + { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": %d, + "digest": "%s" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "%s", + "size": %d + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "%s", + "size": %d + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "%s", + "size": %d + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "digest": "%s", + "size": %d + } + ] + } + `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2, digest1, size1, digest2, size2) + if err := dest.PutManifest([]byte(manifest)); err != nil { + t.Fatalf("Error storing manifest to destination: %v", err) + } + if err := dest.Commit(); err != nil { + t.Fatalf("Error committing changes to destination: %v", err) + } + dest.Close() + + img, err := ref.NewImage(systemContext()) + if err != nil { + t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) + } + src, err := ref.NewImageSource(systemContext(), nil) + if err != nil { + t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) + } + source, ok := src.(*storageImageSource) + if !ok { + t.Fatalf("ImageSource is not a storage image") + } + layers := []string{} + for _, layerInfo := range img.LayerInfos() { + rc, _, layerID, err := source.getBlobAndLayerID(layerInfo) + if err != nil { + t.Fatalf("getBlobAndLayerID(%q) returned error %v", layerInfo.Digest, err) + } + io.Copy(ioutil.Discard, rc) + rc.Close() + layers = append(layers, layerID) + } + if len(layers) != 4 { + t.Fatalf("Incorrect number of layers: %d", len(layers)) + } + for i, layerID := range layers { + for j, otherID := range layers { + if i != j && layerID == otherID { + t.Fatalf("Layer IDs are not unique: %v", layers) + } + } + } + src.Close() + img.Close() +} diff --git a/storage/storage_transport.go b/storage/storage_transport.go new file mode 100644 index 0000000000..71056ff183 --- /dev/null +++ b/storage/storage_transport.go @@ -0,0 +1,285 @@ +package storage + +import ( + "errors" + "path/filepath" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/containers/storage/storage" + "github.com/docker/distribution/digest" + ddigest "github.com/docker/distribution/digest" +) + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") + idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) +} + +type storageTransport struct { + store storage.Store +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + var name reference.Named + var sum digest.Digest + var err error + if ref == "" { + return nil, ErrInvalidReference + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + ref = ref[closeIndex+1:] + } + refInfo := strings.SplitN(ref, "@", 2) + if len(refInfo) == 1 { + // A name. + name, err = reference.ParseNamed(refInfo[0]) + if err != nil { + return nil, err + } + } else if len(refInfo) == 2 { + // An ID, possibly preceded by a name. + if refInfo[0] != "" { + name, err = reference.ParseNamed(refInfo[0]) + if err != nil { + return nil, err + } + } + sum, err = digest.ParseDigest("sha256:" + refInfo[1]) + if err != nil { + return nil, err + } + } else { // Coverage: len(refInfo) is always 1 or 2 + // Anything else: store specified in a form we don't + // recognize. + return nil, ErrInvalidReference + } + storeSpec := "[" + store.GetGraphDriverName() + "@" + store.GetGraphRoot() + "]" + id := "" + if sum.Validate() == nil { + id = sum.Hex() + } + refname := "" + if name != nil { + name = reference.WithDefaultTag(name) + refname = verboseName(name) + } + if refname == "" { + logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) + } else if id == "" { + logrus.Debugf("parsed reference into %q", storeSpec+refname) + } else { + logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) + } + return newReference(storageTransport{store: store}, refname, id, name), nil +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + store, err := storage.GetStore(storage.DefaultStoreOptions) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]", tries to figure out which it is, and returns it in +// a reference object. If the _graphroot_ is a location other than the default, +// it needs to have been previously opened using storage.GetStore(), so that it +// can figure out which run root goes with the graph root. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphRoot: storeInfo[0], + }) + if err != nil { + return nil, err + } + store = store2 + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: storeInfo[0], + GraphRoot: storeInfo[1], + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // Anything else: store specified in a form we don't + // recognize. + return nil, ErrInvalidReference + } + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref == nil { + if sref, ok := ref.(*storageReference); ok { + if sref.id != "" { + if img, err := store.GetImage(sref.id); err == nil { + return img, nil + } + } + } + return nil, ErrInvalidReference + } + return store.GetImage(verboseName(dref)) +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: store specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + // But if there is anything left, it has to be a name, with or without + // a tag, with or without an ID, since we don't return namespace values + // that are just bare IDs. + scopeInfo := strings.SplitN(scope, "@", 2) + if len(scopeInfo) == 1 && scopeInfo[0] != "" { + _, err := reference.ParseNamed(scopeInfo[0]) + if err != nil { + return err + } + } else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" { + _, err := reference.ParseNamed(scopeInfo[0]) + if err != nil { + return err + } + _, err = ddigest.ParseDigest("sha256:" + scopeInfo[1]) + if err != nil { + return err + } + } else { + return ErrInvalidReference + } + return nil +} + +func verboseName(name reference.Named) string { + name = reference.WithDefaultTag(name) + tag := "" + if tagged, ok := name.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + return name.FullName() + ":" + tag +} diff --git a/storage/storage_transport_test.go b/storage/storage_transport_test.go new file mode 100644 index 0000000000..2ca7a657e2 --- /dev/null +++ b/storage/storage_transport_test.go @@ -0,0 +1,146 @@ +package storage + +import ( + "fmt" + "testing" + + "github.com/containers/image/docker/reference" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" +) + +func TestTransportName(t *testing.T) { + assert.Equal(t, "containers-storage", Transport.Name()) +} + +func TestTransportParseStoreReference(t *testing.T) { + for _, c := range []struct{ input, expectedRef, expectedID string }{ + {"", "", ""}, // Empty input + // Handling of the store prefix + // FIXME? Should we be silently discarding input like this? + {"[unterminated", "", ""}, // Unterminated store specifier + {"[garbage]busybox", "docker.io/library/busybox:latest", ""}, // Store specifier is overridden by the store we pass to ParseStoreReference + + {"UPPERCASEISINVALID", "", ""}, // Invalid single-component name + {"sha256:" + sha256digestHex, "docker.io/library/sha256:" + sha256digestHex, ""}, // Valid single-component name; the hex part is not an ID unless it has a "@" prefix + {sha256digestHex, "", ""}, // Invalid single-component ID; not an ID without a "@" prefix, so it's parsed as a name, but names aren't allowed to look like IDs + {"@" + sha256digestHex, "", sha256digestHex}, // Valid single-component ID + {"sha256:ab", "docker.io/library/sha256:ab", ""}, // Valid single-component name, explicit tag + {"busybox", "docker.io/library/busybox:latest", ""}, // Valid single-component name, implicit tag + {"busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, explicit tag + {"docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, everything explicit + + {"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@ID + {"busybox@ab", "", ""}, // Invalid ID in name@ID + {"busybox@", "", ""}, // Empty ID in name@ID + {"busybox@sha256:" + sha256digestHex, "", ""}, // This (a digested docker/docker reference format) is also invalid, since it's an invalid ID in name@ID + {"@" + sha256digestHex, "", sha256digestHex}, // Valid two-component name, with ID only + {"busybox@" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid two-component name, implicit tag + {"busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid two-component name, explicit tag + {"docker.io/library/busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid two-component name, everything explicit + } { + storageRef, err := Transport.ParseStoreReference(Transport.(*storageTransport).store, c.input) + if c.expectedRef == "" && c.expectedID == "" { + assert.Error(t, err, c.input) + } else { + require.NoError(t, err, c.input) + assert.Equal(t, *(Transport.(*storageTransport)), storageRef.transport, c.input) + assert.Equal(t, c.expectedRef, storageRef.reference, c.input) + assert.Equal(t, c.expectedID, storageRef.id, c.input) + if c.expectedRef == "" { + assert.Nil(t, storageRef.name, c.input) + } else { + dockerRef, err := reference.ParseNamed(c.expectedRef) + require.NoError(t, err) + require.NotNil(t, storageRef.name, c.input) + assert.Equal(t, dockerRef.String(), storageRef.name.String()) + } + } + } +} + +func TestTransportParseReference(t *testing.T) { + store := newStore(t) + driver := store.GetGraphDriverName() + root := store.GetGraphRoot() + + for _, c := range []struct{ prefix, expectedDriver, expectedRoot string }{ + {"", driver, root}, // Implicit store location prefix + {"[unterminated", "", ""}, // Unterminated store specifier + {"[]", "", ""}, // Empty store specifier + {"[relative/path]", "", ""}, // Non-absolute graph root path + {"[" + driver + "@relative/path]", "", ""}, // Non-absolute graph root path + {"[thisisunknown@" + root + "suffix2]", "", ""}, // Unknown graph driver + + // The next two could be valid, but aren't enough to allow GetStore() to locate a matching + // store, since the reference can't specify a RunRoot. Without one, GetStore() tries to + // match the GraphRoot (possibly combined with the driver name) against a Store that was + // previously opened using GetStore(), and we haven't done that. + // Future versions of the storage library will probably make this easier for locations that + // are shared, by caching the rest of the information inside the graph root so that it can + // be looked up later, but since this is a per-test temporary location, that won't help here. + //{"[" + root + "suffix1]", driver, root + "suffix1"}, // A valid root path + //{"[" + driver + "@" + root + "suffix3]", driver, root + "suffix3"}, // A valid root@graph pair + } { + ref, err := Transport.ParseReference(c.prefix + "busybox") + if c.expectedDriver == "" { + assert.Error(t, err, c.prefix) + } else { + require.NoError(t, err, c.prefix) + storageRef, ok := ref.(*storageReference) + require.True(t, ok, c.prefix) + assert.Equal(t, c.expectedDriver, storageRef.transport.store.GetGraphDriverName(), c.prefix) + assert.Equal(t, c.expectedRoot, storageRef.transport.store.GetGraphRoot(), c.prefix) + } + } +} + +func TestTransportValidatePolicyConfigurationScope(t *testing.T) { + store := newStore(t) + driver := store.GetGraphDriverName() + root := store.GetGraphRoot() + storeSpec := fmt.Sprintf("[%s@%s]", driver, root) // As computed in PolicyConfigurationNamespaces + + // Valid inputs + for _, scope := range []string{ + "[" + root + "suffix1]", // driverlessStoreSpec in PolicyConfigurationNamespaces + "[" + driver + "@" + root + "suffix3]", // storeSpec in PolicyConfigurationNamespaces + storeSpec + "sha256:ab", // Valid single-component name, explicit tag + storeSpec + "sha256:" + sha256digestHex, // Valid single-component ID with a longer explicit tag + storeSpec + "busybox", // Valid single-component name, implicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox + storeSpec + "busybox:notlatest", // Valid single-component name, explicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox + storeSpec + "docker.io/library/busybox:notlatest", // Valid single-component name, everything explicit + storeSpec + "busybox@" + sha256digestHex, // Valid two-component name, implicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox (and never match) + storeSpec + "busybox:notlatest@" + sha256digestHex, // Valid two-component name, explicit tag; NOTE that this non-canonical form would be interpreted as a scope for host busybox (and never match) + storeSpec + "docker.io/library/busybox:notlatest@" + sha256digestHex, // Valid two-component name, everything explicit + } { + err := Transport.ValidatePolicyConfigurationScope(scope) + assert.NoError(t, err, scope) + } + + // Invalid inputs + for _, scope := range []string{ + "busybox", // Unprefixed reference + "[unterminated", // Unterminated store specifier + "[]", // Empty store specifier + "[relative/path]", // Non-absolute graph root path + "[" + driver + "@relative/path]", // Non-absolute graph root path + // "[thisisunknown@" + root + "suffix2]", // Unknown graph driver FIXME: validate against storage.ListGraphDrivers() once that's available + storeSpec + sha256digestHex, // Almost a valid single-component name, but rejected because it looks like an ID that's missing its "@" prefix + storeSpec + "@", // An incomplete two-component name + storeSpec + "@" + sha256digestHex, // A valid two-component name, but ID-only, so not a valid scope + + storeSpec + "UPPERCASEISINVALID", // Invalid single-component name + storeSpec + "UPPERCASEISINVALID@" + sha256digestHex, // Invalid name in name@ID + storeSpec + "busybox@ab", // Invalid ID in name@ID + storeSpec + "busybox@", // Empty ID in name@ID + storeSpec + "busybox@sha256:" + sha256digestHex, // This (in a digested docker/docker reference format) is also invalid; this can't actually be matched by a storageReference.PolicyConfigurationIdentity, so it should be rejected + } { + err := Transport.ValidatePolicyConfigurationScope(scope) + assert.Error(t, err, scope) + } +} diff --git a/transports/transports.go b/transports/transports.go index 976135177c..d4141df0c6 100644 --- a/transports/transports.go +++ b/transports/transports.go @@ -9,6 +9,7 @@ import ( "github.com/containers/image/docker/daemon" ociLayout "github.com/containers/image/oci/layout" "github.com/containers/image/openshift" + "github.com/containers/image/storage" "github.com/containers/image/types" ) @@ -25,6 +26,7 @@ func init() { daemon.Transport, ociLayout.Transport, openshift.Transport, + storage.Transport, } { name := t.Name() if _, ok := KnownTransports[name]; ok { diff --git a/types/types.go b/types/types.go index 9fd1de5e4b..a05a7064c1 100644 --- a/types/types.go +++ b/types/types.go @@ -1,6 +1,7 @@ package types import ( + "errors" "io" "time" @@ -127,6 +128,7 @@ type ImageSource interface { // // There is a specific required order for some of the calls: // PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest // PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) // Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. // @@ -158,6 +160,10 @@ type ImageDestination interface { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error) + // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. A false result will often be accompanied by an ErrBlobNotFound error. + HasBlob(info BlobInfo) (bool, int64, error) + // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree. + ReapplyBlob(info BlobInfo) (BlobInfo, error) // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. PutManifest([]byte) error PutSignatures(signatures [][]byte) error @@ -212,6 +218,9 @@ type Image interface { UpdatedImage(options ManifestUpdateOptions) (Image, error) // IsMultiImage returns true if the image's manifest is a list of images, false otherwise. IsMultiImage() bool + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) } // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest @@ -284,3 +293,8 @@ type SystemContext struct { // in order to not break any existing docker's integration tests. DockerDisableV1Ping bool } + +var ( + // ErrBlobNotFound can be returned by an ImageDestination's HasBlob() method + ErrBlobNotFound = errors.New("no such blob present") +)