Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions image/copy/digesting_reader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ func TestDigestingReaderRead(t *testing.T) {
{[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"},
{make([]byte, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"},
// SHA512 test cases
{[]byte(""), "sha512:cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"},
{[]byte("abc"), "sha512:ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"},
{make([]byte, 65537), "sha512:490821004e5a6025fe335a11f6c27b0f73cae0434bd9d2e5ac7aee3370bd421718cad7d8fbfd5f39153b6ca3b05faede68f5d6e462eeaf143bb034791ceb72ab"},
}
// Valid input
for _, c := range cases {
Expand Down
11 changes: 10 additions & 1 deletion image/copy/single.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"go.podman.io/image/v5/transports"
"go.podman.io/image/v5/types"
chunkedToc "go.podman.io/storage/pkg/chunked/toc"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)

// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
Expand Down Expand Up @@ -977,7 +978,15 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
}

// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
// This is a wrapper around computeDiffIDWithAlgorithm that uses the globally configured digest algorithm.
Copy link
Contributor

@mtrmac mtrmac Oct 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please document it the other way — this function does not need to be documented as a wrapper, callers don’t care. On the contrary, computeDiffIDWithAlgorithm should say “this is an internal implementation detail of computeDiffID, and exists only to allow testing it …” so that no-one uses it.

func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
algorithm := supportedDigests.TmpDigestForNewObjects()
return computeDiffIDWithAlgorithm(stream, decompressor, algorithm)
}

// computeDiffIDWithAlgorithm reads all input from layerStream, uncompresses it using decompressor if necessary,
// and returns its digest using the specified algorithm.
func computeDiffIDWithAlgorithm(stream io.Reader, decompressor compressiontypes.DecompressorFunc, algorithm digest.Algorithm) (digest.Digest, error) {
if decompressor != nil {
s, err := decompressor(stream)
if err != nil {
Expand All @@ -987,7 +996,7 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
stream = s
}

return digest.Canonical.FromReader(stream)
return algorithm.FromReader(stream)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This works locally, but there’s the cachedDiffID code path where we use an older value. That probably needs to be adjusted, if we want the behavior to be predictable for users.

(This is relevant for schema1 only, and that is nowadays entirely disabled in Docker and the distribution/distribution registry, at least by default. Arguably interoperable support for sha512+schema1 is never going to happen … but, for us, it might be easier to generate sha512+schema1 and let it fail, than to have an ~undocumented exception where we ignore the configuration, or to specifically hard-code an error path and make it absolutely impossible to use such a setup.)

}

// algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names
Expand Down
51 changes: 45 additions & 6 deletions image/copy/single_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"go.podman.io/image/v5/pkg/compression"
compressiontypes "go.podman.io/image/v5/pkg/compression/types"
"go.podman.io/image/v5/types"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)

func TestUpdatedBlobInfoFromReuse(t *testing.T) {
Expand Down Expand Up @@ -110,13 +111,24 @@ func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompre
}

func TestDiffIDComputationGoroutine(t *testing.T) {
// Test with SHA256 (default)
stream, err := os.Open("fixtures/Hello.uncompressed")
require.NoError(t, err)
res := goDiffIDComputationGoroutineWithTimeout(stream, nil)
require.NotNil(t, res)
assert.NoError(t, res.err)
assert.Equal(t, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969", res.digest.String())

// Test with SHA512 using the parametrized function
stream2, err := os.Open("fixtures/Hello.uncompressed")
require.NoError(t, err)
defer stream2.Close()

// Use the parametrized function directly instead of overriding global state
digest, err := computeDiffIDWithAlgorithm(stream2, nil, digest.SHA512)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is supposed to be a test of DiffIDComputationGoroutine

require.NoError(t, err)
assert.Equal(t, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315", digest.String())

// Error reading input
reader, writer := io.Pipe()
err = writer.CloseWithError(errors.New("Expected error reading input in diffIDComputationGoroutine"))
Expand All @@ -130,32 +142,59 @@ func TestComputeDiffID(t *testing.T) {
for _, c := range []struct {
filename string
decompressor compressiontypes.DecompressorFunc
algorithm digest.Algorithm
result digest.Digest
}{
{"fixtures/Hello.uncompressed", nil, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
{"fixtures/Hello.gz", nil, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"},
{"fixtures/Hello.gz", compression.GzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
{"fixtures/Hello.zst", nil, "sha256:361a8e0372ad438a0316eb39a290318364c10b60d0a7e55b40aa3eafafc55238"},
{"fixtures/Hello.zst", compression.ZstdDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
// SHA256 test cases (default)
{"fixtures/Hello.uncompressed", nil, digest.SHA256, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
{"fixtures/Hello.gz", nil, digest.SHA256, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"},
{"fixtures/Hello.gz", compression.GzipDecompressor, digest.SHA256, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
{"fixtures/Hello.zst", nil, digest.SHA256, "sha256:361a8e0372ad438a0316eb39a290318364c10b60d0a7e55b40aa3eafafc55238"},
{"fixtures/Hello.zst", compression.ZstdDecompressor, digest.SHA256, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"},
// SHA512 test cases
{"fixtures/Hello.uncompressed", nil, digest.SHA512, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315"},
{"fixtures/Hello.gz", nil, digest.SHA512, "sha512:8ee9be48dfc6274f65199847cd18ff4711f00329c5063b17cd128ba45ea1b9cea2479db0266cc1f4a3902874fdd7306f9c8a615347c0603b893fc75184fcb627"},
{"fixtures/Hello.gz", compression.GzipDecompressor, digest.SHA512, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315"},
{"fixtures/Hello.zst", nil, digest.SHA512, "sha512:e4ddd61689ce9d1cdd49e11dc8dc89ca064bdb09e85b9df56658560b8207647a78b95d04c3f5f2fb31abf13e1822f0d19307df18a3fdf88f58ef24a50e71a1ae"},
{"fixtures/Hello.zst", compression.ZstdDecompressor, digest.SHA512, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315"},
} {
stream, err := os.Open(c.filename)
require.NoError(t, err, c.filename)
defer stream.Close()

// Save original algorithm and set the desired one
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

… and this test, which actually does test computeDiffID, was not updated to benefit from the new parametrized variant.

originalAlgorithm := supportedDigests.TmpDigestForNewObjects()
err = supportedDigests.TmpSetDigestForNewObjects(c.algorithm)
require.NoError(t, err)

// Test the digest computation directly without ImageDestination
diffID, err := computeDiffID(stream, c.decompressor)
require.NoError(t, err, c.filename)
assert.Equal(t, c.result, diffID)

// Restore the original algorithm
err = supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm)
require.NoError(t, err)
}

// Error initializing decompression
_, err := computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor)
originalAlgorithm := supportedDigests.TmpDigestForNewObjects()
err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA256)
require.NoError(t, err)
_, err = computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor)
assert.Error(t, err)
err = supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm)
require.NoError(t, err)

// Error reading input
reader, writer := io.Pipe()
defer reader.Close()
err = writer.CloseWithError(errors.New("Expected error reading input in computeDiffID"))
require.NoError(t, err)
err = supportedDigests.TmpSetDigestForNewObjects(digest.SHA256)
require.NoError(t, err)
_, err = computeDiffID(reader, nil)
assert.Error(t, err)
err = supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm)
require.NoError(t, err)
}
2 changes: 1 addition & 1 deletion image/directory/directory_dest.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
}
}()

digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Locally, fine, that fulfills the PutBlobWithOptions contract…

… and then copyBlobFromStream might fail on if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest, and worse, copyLayers will see layerDigestsDiffer and run into cannotModifyManifestReason. And then we are going to talk about “user would prefer to use this digest” vs. “user requires to use this digest, and if we can’t, we must fail”… I’m not sure what TmpDigestForNewObjects is but I suspect the former, and I’m also not sure whether we will also need the latter.

Either way, the decision of an algorithm to use in copies, when writing to the destination must be managed more precisely, and the copy code will probably want to be in control of the trade-offs.

I think that implies it must be basically caller-decided, somewhere in the copy code (and PutBlob* still needs to do something what for external callers — perhaps default to sha256 for API compatibility, perhaps really use the configured value.)

// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion image/docker/docker_image_dest.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
}

digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo)
sizeCounter := &sizeCounter{}
stream = io.TeeReader(stream, sizeCounter)

Expand Down
10 changes: 9 additions & 1 deletion image/internal/image/docker_schema1.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"go.podman.io/image/v5/docker/reference"
"go.podman.io/image/v5/manifest"
"go.podman.io/image/v5/types"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)

type manifestSchema1 struct {
Expand Down Expand Up @@ -160,6 +161,13 @@ func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, o
//
// Based on github.com/docker/docker/distribution/pull_v2.go
func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
// Explicitly reject SHA512+Schema1 combinations as they are not supported
// Schema1 is deprecated and Docker/registry don't support SHA512+Schema1
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Schema1 is deprecated and we don’t expect it to work with sha512? Fine.

Then we don’t touch the code that converts to schema1, but we do modify the code that converts from schema1 to _non_schema1 to reject sha512? That’s exactly the wrong way around.

(And then the code individually rejects sha512, when it should reject all other non-sha256 algorithms instead….)

configuredAlgorithm := supportedDigests.TmpDigestForNewObjects()
if configuredAlgorithm == digest.SHA512 {
return nil, fmt.Errorf("SHA512+Schema1 is not supported: Schema1 is deprecated and Docker/registry do not support SHA512 with Schema1 manifests. Please use SHA256 or convert to Schema2/OCI format")
}

uploadedLayerInfos := options.InformationOnly.LayerInfos
layerDiffIDs := options.InformationOnly.LayerDiffIDs

Expand Down Expand Up @@ -219,7 +227,7 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t
configDescriptor := manifest.Schema2Descriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(configJSON)),
Digest: digest.FromBytes(configJSON),
Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(configJSON),
}

if options.LayerInfos != nil {
Expand Down
36 changes: 36 additions & 0 deletions image/internal/image/docker_schema1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"go.podman.io/image/v5/docker/reference"
"go.podman.io/image/v5/manifest"
"go.podman.io/image/v5/types"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)

var schema1FixtureLayerInfos = []types.BlobInfo{
Expand Down Expand Up @@ -720,3 +721,38 @@ func TestManifestSchema1CanChangeLayerCompression(t *testing.T) {
assert.True(t, m.CanChangeLayerCompression(""))
}
}

// TestSchema1SHA512Rejection tests that SHA512+Schema1 combinations are explicitly rejected
func TestSchema1SHA512Rejection(t *testing.T) {
// Save original algorithm and restore it after the test
originalAlgorithm := supportedDigests.TmpDigestForNewObjects()
defer func() {
err := supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm)
require.NoError(t, err)
}()

// Set SHA512 algorithm
err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA512)
require.NoError(t, err)

// Create a schema1 manifest
manifestBlob, err := os.ReadFile(filepath.Join("fixtures", "schema1.json"))
require.NoError(t, err)

m, err := manifestSchema1FromManifest(manifestBlob)
require.NoError(t, err)

// Try to convert to schema2 with SHA512 - this should fail
schema1Manifest := m.(*manifestSchema1)
_, err = schema1Manifest.convertToManifestSchema2(context.Background(), &types.ManifestUpdateOptions{
InformationOnly: types.ManifestUpdateInformation{
LayerInfos: schema1FixtureLayerInfos,
},
})

// Should get an error about SHA512+Schema1 not being supported
require.Error(t, err)
assert.Contains(t, err.Error(), "SHA512+Schema1 is not supported")
assert.Contains(t, err.Error(), "Schema1 is deprecated")
assert.Contains(t, err.Error(), "Please use SHA256 or convert to Schema2/OCI format")
}
9 changes: 6 additions & 3 deletions image/internal/image/docker_schema2.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"go.podman.io/image/v5/internal/iolimits"
"go.podman.io/image/v5/manifest"
"go.podman.io/image/v5/pkg/blobinfocache/none"
"go.podman.io/image/v5/pkg/digestvalidation"
"go.podman.io/image/v5/types"
)

Expand Down Expand Up @@ -110,9 +111,11 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.ConfigDescriptor.Digest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
expectedDigest := m.m.ConfigDescriptor.Digest
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(I’m not sure that we need an extra variable here.)


// Validate the blob against the expected digest using centralized validation
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yet again, not worth commenting.

if err := digestvalidation.ValidateBlobAgainstDigest(blob, expectedDigest); err != nil {
return nil, fmt.Errorf("config descriptor validation failed: %w", err)
}
m.configBlob = blob
}
Expand Down
10 changes: 6 additions & 4 deletions image/internal/image/oci.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"slices"

ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"go.podman.io/image/v5/docker/reference"
"go.podman.io/image/v5/internal/iolimits"
Expand Down Expand Up @@ -74,9 +73,12 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.Config.Digest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
// Use the same algorithm as the expected digest
expectedDigest := m.m.Config.Digest
algorithm := expectedDigest.Algorithm()
computedDigest := algorithm.FromBytes(blob)
if computedDigest != expectedDigest {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So the schema2 code got the nice new ValidateBlobAgainstDigest … and this ~equivalent does not use it.

return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, expectedDigest)
}
m.configBlob = blob
}
Expand Down
3 changes: 2 additions & 1 deletion image/internal/manifest/manifest.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
compressiontypes "go.podman.io/image/v5/pkg/compression/types"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)

// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
Expand Down Expand Up @@ -123,7 +124,7 @@ func Digest(manifest []byte) (digest.Digest, error) {
}
}

return digest.FromBytes(manifest), nil
return supportedDigests.TmpDigestForNewObjects().FromBytes(manifest), nil
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don’t know that we can just change what manifest.Digest outputs; I’m leaning towards “no”. Compare #375 (comment) .

}

// MatchesDigest returns true iff the manifest matches expectedDigest.
Expand Down
72 changes: 72 additions & 0 deletions image/internal/manifest/manifest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.podman.io/image/v5/pkg/compression"
supportedDigests "go.podman.io/storage/pkg/supported-digests"
)

const (
Expand Down Expand Up @@ -87,6 +88,9 @@ func TestMatchesDigest(t *testing.T) {
{"v2s1.manifest.json", TestDockerV2S2ManifestDigest, false},
// Unrecognized algorithm
{"v2s2.manifest.json", digest.Digest("md5:2872f31c5c1f62a694fbd20c1e85257c"), false},
// SHA512 test cases (these should fail because we're using SHA256 by default)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, there’s no reason why MatchesDigest shouldn’t be able to validate against sha512. It doesn’t, but that can be fixed.

{"v2s2.manifest.json", digest.SHA512.FromBytes([]byte("test")), false},
{"v2s1.manifest.json", digest.SHA512.FromBytes([]byte("test")), false},
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not much of a test case… and MatchesDigest is actually an important function that needs updating to support sha512.

// Mangled format
{"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String() + "abc"), false},
{"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String()[:20]), false},
Expand All @@ -112,6 +116,74 @@ func TestMatchesDigest(t *testing.T) {
assert.NoError(t, err)
}

func TestMatchesDigestWithSHA512(t *testing.T) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nah. It doesn’t work for a caller with a single sha512 digest do change a process-wide global (racing against other goroutines), and it’s entirely unnecessary.

… and, after that is fixed, this should be tested just by a few more table entries in the existing test.

// Save original algorithm and restore it after the test
originalAlgorithm := supportedDigests.TmpDigestForNewObjects()
defer func() {
err := supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm)
require.NoError(t, err)
}()

// Set SHA512 algorithm
err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA512)
require.NoError(t, err)

cases := []struct {
path string
expectedDigest digest.Digest
result bool
}{
// Success cases with SHA512
{"v2s2.manifest.json", digest.SHA512.FromBytes([]byte("test")), false}, // Wrong data
{"v2s1.manifest.json", digest.SHA512.FromBytes([]byte("test")), false}, // Wrong data
// Empty manifest
{"", digest.SHA512.FromBytes([]byte{}), true},
// Wrong algorithm (SHA256 when SHA512 is configured)
{"v2s2.manifest.json", TestDockerV2S2ManifestDigest, false},
{"v2s1.manifest.json", TestDockerV2S1ManifestDigest, false},
// Mangled format
{"v2s2.manifest.json", digest.Digest("sha512:invalid"), false},
{"v2s2.manifest.json", digest.Digest(""), false},
}

for _, c := range cases {
var manifest []byte
var err error

if c.path == "" {
// Empty manifest case
manifest = []byte{}
} else {
manifest, err = os.ReadFile(filepath.Join("testdata", c.path))
require.NoError(t, err)
}

// For success cases, compute the correct SHA512 digest
if c.result {
c.expectedDigest = digest.SHA512.FromBytes(manifest)
}

res, err := MatchesDigest(manifest, c.expectedDigest)
require.NoError(t, err)
assert.Equal(t, c.result, res, "path=%s, expectedDigest=%s", c.path, c.expectedDigest)
}

// Test with actual manifest files and their correct SHA512 digests
manifestFiles := []string{"v2s2.manifest.json", "v2s1.manifest.json"}
for _, file := range manifestFiles {
manifest, err := os.ReadFile(filepath.Join("testdata", file))
require.NoError(t, err)

// Use the Digest function to get the correct digest (which handles signature stripping)
expectedDigest, err := Digest(manifest)
require.NoError(t, err)

res, err := MatchesDigest(manifest, expectedDigest)
require.NoError(t, err)
assert.True(t, res, "MatchesDigest should work with SHA512 for %s", file)
}
}

func TestNormalizedMIMEType(t *testing.T) {
for _, c := range []string{ // Valid MIME types, normalized to themselves
DockerV2Schema1MediaType,
Expand Down
Loading
Loading