diff --git a/copy/copy.go b/copy/copy.go new file mode 100644 index 0000000000..0a2241fb83 --- /dev/null +++ b/copy/copy.go @@ -0,0 +1,165 @@ +package copy + +import ( + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "fmt" + "hash" + "io" + "strings" + + "github.com/containers/image/image" + "github.com/containers/image/signature" + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +// supportedDigests lists the supported blob digest types. +var supportedDigests = map[string]func() hash.Hash{ + "sha256": sha256.New, +} + +type digestingReader struct { + source io.Reader + digest hash.Hash + expectedDigest []byte + failureIndicator *bool +} + +// newDigestingReader returns an io.Reader with contents of source, which will eventually return a non-EOF error +// and set *failureIndicator to true if the source stream does not match expectedDigestString. +func newDigestingReader(source io.Reader, expectedDigestString string, failureIndicator *bool) (io.Reader, error) { + fields := strings.SplitN(expectedDigestString, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString) + } + fn, ok := supportedDigests[fields[0]] + if !ok { + return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0]) + } + digest := fn() + expectedDigest, err := hex.DecodeString(fields[1]) + if err != nil { + return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err) + } + if len(expectedDigest) != digest.Size() { + return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size()) + } + return &digestingReader{ + source: source, + digest: digest, + expectedDigest: expectedDigest, + failureIndicator: failureIndicator, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digest.Sum(nil) + if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 { + *d.failureIndicator = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest)) + } + } + return n, err +} + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), +} + +// Image copies image from srcRef to destRef, using policyContext to validate source image admissibility. +func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) error { + dest, err := destRef.NewImageDestination(ctx) + if err != nil { + return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err) + } + rawSource, err := srcRef.NewImageSource(ctx, dest.SupportedManifestMIMETypes()) + if err != nil { + return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err) + } + src := image.FromSource(rawSource) + + // Please keep this policy check BEFORE reading any other information about the image. + if allowed, err := policyContext.IsRunningImageAllowed(src); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return fmt.Errorf("Source image rejected: %v", err) + } + + manifest, _, err := src.Manifest() + if err != nil { + return fmt.Errorf("Error reading manifest: %v", err) + } + + sigs, err := src.Signatures() + if err != nil { + return fmt.Errorf("Error reading signatures: %v", err) + } + + blobDigests, err := src.BlobDigests() + if err != nil { + return fmt.Errorf("Error parsing manifest: %v", err) + } + for _, digest := range blobDigests { + // TODO(mitr): do not ignore the size param returned here + stream, _, err := rawSource.GetBlob(digest) + if err != nil { + return fmt.Errorf("Error reading blob %s: %v", digest, err) + } + defer stream.Close() + + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that we don't use a stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + validationFailed := false // This is a new instance on each loop iteration. + digestingReader, err := newDigestingReader(stream, digest, &validationFailed) + if err != nil { + return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err) + } + if err := dest.PutBlob(digest, digestingReader); err != nil { + return fmt.Errorf("Error writing blob: %v", err) + } + if validationFailed { // Coverage: This should never happen. + return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest) + } + } + + if options != nil && options.SignBy != "" { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return fmt.Errorf("Error initializing GPG: %v", err) + } + dockerReference := dest.Reference().DockerReference() + if dockerReference == nil { + return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) + } + + newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy) + if err != nil { + return fmt.Errorf("Error creating signature: %v", err) + } + sigs = append(sigs, newSig) + } + + // FIXME: We need to call PutManifest after PutBlob and before PutSignatures. This seems ugly; move to a "set properties" + "commit" model? + if err := dest.PutManifest(manifest); err != nil { + return fmt.Errorf("Error writing manifest: %v", err) + } + + if err := dest.PutSignatures(sigs); err != nil { + return fmt.Errorf("Error writing signatures: %v", err) + } + return nil +} diff --git a/copy/copy_test.go b/copy/copy_test.go new file mode 100644 index 0000000000..0a0fd6793d --- /dev/null +++ b/copy/copy_test.go @@ -0,0 +1,62 @@ +package copy + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewDigestingReader(t *testing.T) { + // Only the failure cases, success is tested in TestDigestingReaderRead below. + source := bytes.NewReader([]byte("abc")) + for _, input := range []string{ + "abc", // Not algo:hexvalue + "crc32:", // Unknown algorithm, empty value + "crc32:012345678", // Unknown algorithm + "sha256:", // Empty value + "sha256:0", // Invalid hex value + "sha256:01", // Invalid length of hex value + } { + validationFailed := false + _, err := newDigestingReader(source, input, &validationFailed) + assert.Error(t, err, input) + } +} + +func TestDigestingReaderRead(t *testing.T) { + cases := []struct { + input []byte + digest string + }{ + {[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + {[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}, + {make([]byte, 65537, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"}, + } + // Valid input + for _, c := range cases { + source := bytes.NewReader(c.input) + validationFailed := false + reader, err := newDigestingReader(source, c.digest, &validationFailed) + require.NoError(t, err, c.digest) + dest := bytes.Buffer{} + n, err := io.Copy(&dest, reader) + assert.NoError(t, err, c.digest) + assert.Equal(t, int64(len(c.input)), n, c.digest) + assert.Equal(t, c.input, dest.Bytes(), c.digest) + assert.False(t, validationFailed, c.digest) + } + // Modified input + for _, c := range cases { + source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil)) + validationFailed := false + reader, err := newDigestingReader(source, c.digest, &validationFailed) + require.NoError(t, err, c.digest) + dest := bytes.Buffer{} + _, err = io.Copy(&dest, reader) + assert.Error(t, err, c.digest) + assert.True(t, validationFailed) + } +}