diff --git a/sdk/containers/azcontainerregistry/assets.json b/sdk/containers/azcontainerregistry/assets.json index cb7b42790499..6ee1577aeeb2 100644 --- a/sdk/containers/azcontainerregistry/assets.json +++ b/sdk/containers/azcontainerregistry/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/containers/azcontainerregistry", - "Tag": "go/containers/azcontainerregistry_5bce238ccf" + "Tag": "go/containers/azcontainerregistry_9579d04096" } diff --git a/sdk/containers/azcontainerregistry/blob_client_test.go b/sdk/containers/azcontainerregistry/blob_client_test.go index 5c6bb3572cdc..635a446993b5 100644 --- a/sdk/containers/azcontainerregistry/blob_client_test.go +++ b/sdk/containers/azcontainerregistry/blob_client_test.go @@ -9,13 +9,21 @@ package azcontainerregistry import ( "bytes" "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/mock" "github.com/stretchr/testify/require" "io" + "net/http" + "strconv" + "strings" "testing" ) +const alpineBlobDigest = "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" + func TestBlobClient_CancelUpload(t *testing.T) { startRecording(t) endpoint, cred, options := getEndpointCredAndClientOptions(t) @@ -44,10 +52,9 @@ func TestBlobClient_CheckBlobExists(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - res, err := client.CheckBlobExists(ctx, "alpine", digest, nil) + res, err := client.CheckBlobExists(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) - require.Equal(t, digest, *res.DockerContentDigest) + require.Equal(t, alpineBlobDigest, *res.DockerContentDigest) } func TestBlobClient_CheckBlobExists_fail(t *testing.T) { @@ -76,8 +83,7 @@ func TestBlobClient_CheckChunkExists(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - res, err := client.CheckChunkExists(ctx, "alpine", digest, "bytes=0-299", nil) + res, err := client.CheckChunkExists(ctx, "alpine", alpineBlobDigest, "bytes=0-299", nil) require.NoError(t, err) require.NotEmpty(t, *res.ContentLength) } @@ -108,8 +114,7 @@ func TestBlobClient_completeUpload_wrongDigest(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - getRes, err := client.GetBlob(ctx, "alpine", digest, nil) + getRes, err := client.GetBlob(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) blob, err := io.ReadAll(getRes.BlobData) require.NoError(t, err) @@ -127,8 +132,7 @@ func TestBlobClient_DeleteBlob(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - _, err = client.DeleteBlob(ctx, "alpine", digest, nil) + _, err = client.DeleteBlob(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) } @@ -158,10 +162,32 @@ func TestBlobClient_GetBlob(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - res, err := client.GetBlob(ctx, "alpine", digest, nil) + res, err := client.GetBlob(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) require.NotEmpty(t, *res.ContentLength) + reader, err := NewDigestValidationReader(alpineBlobDigest, res.BlobData) + require.NoError(t, err) + _, err = io.ReadAll(reader) + require.NoError(t, err) +} + +func TestBlobClient_GetBlob_wrongDigest(t *testing.T) { + srv, closeServer := mock.NewServer() + defer closeServer() + srv.AppendResponse(mock.WithStatusCode(http.StatusOK), mock.WithBody([]byte("test"))) + + pl := runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &policy.ClientOptions{Transport: srv}) + client := &BlobClient{ + srv.URL(), + pl, + } + ctx := context.Background() + resp, err := client.GetBlob(ctx, "name", "sha256:9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", nil) + require.NoError(t, err) + reader, err := NewDigestValidationReader("sha256:wrong", resp.BlobData) + require.NoError(t, err) + _, err = io.ReadAll(reader) + require.Error(t, err, ErrMismatchedHash) } func TestBlobClient_GetBlob_fail(t *testing.T) { @@ -190,10 +216,27 @@ func TestBlobClient_GetChunk(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - res, err := client.GetChunk(ctx, "alpine", digest, "bytes=0-999", nil) + chunkSize := 1000 + current := 0 + blob := bytes.NewBuffer(nil) + for { + res, err := client.GetChunk(ctx, "alpine", alpineBlobDigest, fmt.Sprintf("bytes=%d-%d", current, current+chunkSize-1), nil) + require.NoError(t, err) + chunk, err := io.ReadAll(res.ChunkData) + require.NoError(t, err) + _, err = blob.Write(chunk) + require.NoError(t, err) + totalSize, _ := strconv.Atoi(strings.Split(*res.ContentRange, "/")[1]) + currentRangeEnd, _ := strconv.Atoi(strings.Split(strings.Split(*res.ContentRange, "/")[0], "-")[1]) + if totalSize == currentRangeEnd+1 { + break + } + current += chunkSize + } + reader, err := NewDigestValidationReader(alpineBlobDigest, blob) + require.NoError(t, err) + _, err = io.ReadAll(reader) require.NoError(t, err) - require.Equal(t, int64(1000), *res.ContentLength) } func TestBlobClient_GetChunk_fail(t *testing.T) { @@ -247,8 +290,7 @@ func TestBlobClient_MountBlob(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - res, err := client.MountBlob(ctx, "hello-world", "alpine", digest, nil) + res, err := client.MountBlob(ctx, "hello-world", "alpine", alpineBlobDigest, nil) require.NoError(t, err) require.NotEmpty(t, res.Location) } diff --git a/sdk/containers/azcontainerregistry/blob_custom_client.go b/sdk/containers/azcontainerregistry/blob_custom_client.go index d8b04d0857d5..f7ce95d65c09 100644 --- a/sdk/containers/azcontainerregistry/blob_custom_client.go +++ b/sdk/containers/azcontainerregistry/blob_custom_client.go @@ -8,8 +8,6 @@ package azcontainerregistry import ( "context" - "crypto/sha256" - "encoding" "errors" "fmt" "github.com/Azure/azure-sdk-for-go/sdk/azcore" @@ -17,7 +15,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "hash" "io" "reflect" ) @@ -61,59 +58,6 @@ func NewBlobClient(endpoint string, credential azcore.TokenCredential, options * }, nil } -// BlobDigestCalculator help to calculate blob digest when uploading blob. -// Don't use this type directly, use NewBlobDigestCalculator() instead. -type BlobDigestCalculator struct { - h hash.Hash - hashState []byte -} - -type wrappedReadSeeker struct { - io.Reader - io.Seeker -} - -// NewBlobDigestCalculator creates a new calculator to help to calculate blob digest when uploading blob. -func NewBlobDigestCalculator() *BlobDigestCalculator { - return &BlobDigestCalculator{ - h: sha256.New(), - } -} - -func (b *BlobDigestCalculator) saveState() { - b.hashState, _ = b.h.(encoding.BinaryMarshaler).MarshalBinary() -} - -func (b *BlobDigestCalculator) restoreState() { - if b.hashState == nil { - return - } - _ = b.h.(encoding.BinaryUnmarshaler).UnmarshalBinary(b.hashState) -} - -// newLimitTeeReader returns a Reader that writes to w what it reads from r with n bytes limit. -func newLimitTeeReader(r io.Reader, w io.Writer, n int64) io.Reader { - return &limitTeeReader{r, w, n} -} - -type limitTeeReader struct { - r io.Reader - w io.Writer - n int64 -} - -func (lt *limitTeeReader) Read(p []byte) (int, error) { - n, err := lt.r.Read(p) - if n > 0 && lt.n > 0 { - wn, werr := lt.w.Write(p[:n]) - if werr != nil { - return wn, werr - } - lt.n -= int64(wn) - } - return n, err -} - // BlobClientUploadChunkOptions contains the optional parameters for the BlobClient.UploadChunk method. type BlobClientUploadChunkOptions struct { // Start of range for the blob to be uploaded. @@ -130,15 +74,11 @@ type BlobClientUploadChunkOptions struct { // - options - BlobClientUploadChunkOptions contains the optional parameters for the BlobClient.UploadChunk method. func (client *BlobClient) UploadChunk(ctx context.Context, location string, chunkData io.ReadSeeker, blobDigestCalculator *BlobDigestCalculator, options *BlobClientUploadChunkOptions) (BlobClientUploadChunkResponse, error) { blobDigestCalculator.saveState() - size, err := chunkData.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size - if err != nil { - return BlobClientUploadChunkResponse{}, err - } - _, err = chunkData.Seek(0, io.SeekStart) + reader, err := blobDigestCalculator.wrapReader(chunkData) if err != nil { return BlobClientUploadChunkResponse{}, err } - wrappedChunkData := &wrappedReadSeeker{Reader: newLimitTeeReader(chunkData, blobDigestCalculator.h, size), Seeker: chunkData} + wrappedChunkData := &wrappedReadSeeker{Reader: reader, Seeker: chunkData} var requestOptions *blobClientUploadChunkOptions if options != nil && options.RangeStart != nil && options.RangeEnd != nil { requestOptions = &blobClientUploadChunkOptions{ContentRange: to.Ptr(fmt.Sprintf("%d-%d", *options.RangeStart, *options.RangeEnd))} @@ -157,5 +97,5 @@ func (client *BlobClient) UploadChunk(ctx context.Context, location string, chun // - blobDigestCalculator - Calculator that help to calculate blob digest // - options - BlobClientCompleteUploadOptions contains the optional parameters for the BlobClient.CompleteUpload method. func (client *BlobClient) CompleteUpload(ctx context.Context, location string, blobDigestCalculator *BlobDigestCalculator, options *BlobClientCompleteUploadOptions) (BlobClientCompleteUploadResponse, error) { - return client.completeUpload(ctx, fmt.Sprintf("sha256:%x", blobDigestCalculator.h.Sum(nil)), location, options) + return client.completeUpload(ctx, blobDigestCalculator.getDigest(), location, options) } diff --git a/sdk/containers/azcontainerregistry/blob_custom_client_test.go b/sdk/containers/azcontainerregistry/blob_custom_client_test.go index 2023ccb2ec94..a1721077c132 100644 --- a/sdk/containers/azcontainerregistry/blob_custom_client_test.go +++ b/sdk/containers/azcontainerregistry/blob_custom_client_test.go @@ -28,8 +28,7 @@ func TestBlobClient_CompleteUpload(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - getRes, err := client.GetBlob(ctx, "alpine", digest, nil) + getRes, err := client.GetBlob(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) blob, err := io.ReadAll(getRes.BlobData) require.NoError(t, err) @@ -49,8 +48,7 @@ func TestBlobClient_UploadChunk(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - getRes, err := client.GetBlob(ctx, "alpine", digest, nil) + getRes, err := client.GetBlob(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) blob, err := io.ReadAll(getRes.BlobData) require.NoError(t, err) @@ -70,8 +68,7 @@ func TestBlobClient_CompleteUpload_uploadByChunk(t *testing.T) { ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - getRes, err := client.GetBlob(ctx, "alpine", digest, nil) + getRes, err := client.GetBlob(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) blob, err := io.ReadAll(getRes.BlobData) require.NoError(t, err) @@ -79,15 +76,26 @@ func TestBlobClient_CompleteUpload_uploadByChunk(t *testing.T) { require.NoError(t, err) calculator := NewBlobDigestCalculator() oriReader := bytes.NewReader(blob) - firstPart := io.NewSectionReader(oriReader, int64(0), int64(len(blob)/2)) - secondPart := io.NewSectionReader(oriReader, int64(len(blob)/2), int64(len(blob)-len(blob)/2)) - uploadResp, err := client.UploadChunk(ctx, *startRes.Location, firstPart, calculator, &BlobClientUploadChunkOptions{RangeStart: to.Ptr(int32(0)), RangeEnd: to.Ptr(int32(len(blob)/2 - 1))}) - require.NoError(t, err) - require.NotEmpty(t, *uploadResp.Location) - uploadResp, err = client.UploadChunk(ctx, *uploadResp.Location, secondPart, calculator, &BlobClientUploadChunkOptions{RangeStart: to.Ptr(int32(len(blob) / 2)), RangeEnd: to.Ptr(int32(len(blob) - 1))}) - require.NoError(t, err) - require.NotEmpty(t, *uploadResp.Location) - completeResp, err := client.CompleteUpload(ctx, *uploadResp.Location, calculator, nil) + size := int64(len(blob)) + chunkSize := int64(736) + current := int64(0) + location := *startRes.Location + for { + end := current + chunkSize + if end > size { + end = size + } + chunkReader := io.NewSectionReader(oriReader, current, end-current) + uploadResp, err := client.UploadChunk(ctx, location, chunkReader, calculator, &BlobClientUploadChunkOptions{RangeStart: to.Ptr(int32(current)), RangeEnd: to.Ptr(int32(end - 1))}) + require.NoError(t, err) + require.NotEmpty(t, *uploadResp.Location) + location = *uploadResp.Location + current = end + if current >= size { + break + } + } + completeResp, err := client.CompleteUpload(ctx, location, calculator, nil) require.NoError(t, err) require.NotEmpty(t, *completeResp.DockerContentDigest) } @@ -103,28 +111,13 @@ func TestNewBlobClient(t *testing.T) { require.Errorf(t, err, "provided Cloud field is missing Azure Container Registry configuration") } -func TestBlobDigestCalculator_saveAndRestoreState(t *testing.T) { - calculator := NewBlobDigestCalculator() - calculator.restoreState() - calculator.saveState() - calculator.restoreState() - calculator.h.Write([]byte("test1")) - sum := calculator.h.Sum(nil) - calculator.saveState() - calculator.h.Write([]byte("test2")) - require.NotEqual(t, sum, calculator.h.Sum(nil)) - calculator.restoreState() - require.Equal(t, sum, calculator.h.Sum(nil)) -} - func TestBlobClient_CompleteUpload_uploadByChunkFailOver(t *testing.T) { startRecording(t) endpoint, cred, options := getEndpointCredAndClientOptions(t) ctx := context.Background() client, err := NewBlobClient(endpoint, cred, &BlobClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:042a816809aac8d0f7d7cacac7965782ee2ecac3f21bcf9f24b1de1a7387b769" - getRes, err := client.GetBlob(ctx, "alpine", digest, nil) + getRes, err := client.GetBlob(ctx, "alpine", alpineBlobDigest, nil) require.NoError(t, err) blob, err := io.ReadAll(getRes.BlobData) require.NoError(t, err) diff --git a/sdk/containers/azcontainerregistry/client_test.go b/sdk/containers/azcontainerregistry/client_test.go index ca3041ddc93a..b43fe8345733 100644 --- a/sdk/containers/azcontainerregistry/client_test.go +++ b/sdk/containers/azcontainerregistry/client_test.go @@ -21,6 +21,8 @@ import ( "testing" ) +const alpineManifestDigest = "sha256:f271e74b17ced29b915d351685fd4644785c6d1559dd1f2d4189a5e851ef753a" + func TestClient_DeleteManifest(t *testing.T) { startRecording(t) endpoint, cred, options := getEndpointCredAndClientOptions(t) @@ -131,12 +133,33 @@ func TestClient_GetManifest(t *testing.T) { require.NoError(t, err) res, err := client.GetManifest(ctx, "alpine", "3.17.1", &ClientGetManifestOptions{Accept: to.Ptr("application/vnd.docker.distribution.manifest.v2+json")}) require.NoError(t, err) - manifest, err := io.ReadAll(res.ManifestData) + reader, err := NewDigestValidationReader(*res.DockerContentDigest, res.ManifestData) + require.NoError(t, err) + manifest, err := io.ReadAll(reader) require.NoError(t, err) require.NotEmpty(t, manifest) fmt.Printf("manifest content: %s\n", manifest) } +func TestClient_GetManifest_wrongServerDigest(t *testing.T) { + srv, closeServer := mock.NewServer() + defer closeServer() + srv.AppendResponse(mock.WithStatusCode(http.StatusOK), mock.WithBody([]byte("test")), mock.WithHeader("Docker-Content-Digest", "sha256:wrong")) + + pl := runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &policy.ClientOptions{Transport: srv}) + client := &Client{ + srv.URL(), + pl, + } + ctx := context.Background() + resp, err := client.GetManifest(ctx, "name", "sha256:9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", nil) + require.NoError(t, err) + reader, err := NewDigestValidationReader(*resp.DockerContentDigest, resp.ManifestData) + require.NoError(t, err) + _, err = io.ReadAll(reader) + require.Error(t, err, ErrMismatchedHash) +} + func TestClient_GetManifest_empty(t *testing.T) { ctx := context.Background() client, err := NewClient("endpoint", nil, nil) @@ -163,16 +186,14 @@ func TestClient_GetManifestProperties(t *testing.T) { ctx := context.Background() client, err := NewClient(endpoint, cred, &ClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:f271e74b17ced29b915d351685fd4644785c6d1559dd1f2d4189a5e851ef753a" - tag := "3.17.1" - digestRes, err := client.GetManifestProperties(ctx, "alpine", digest, nil) + digestRes, err := client.GetManifestProperties(ctx, "alpine", alpineManifestDigest, nil) require.NoError(t, err) - require.Equal(t, *digestRes.Manifest.Digest, digest) - resp, err := client.GetTagProperties(ctx, "alpine", tag, nil) + require.Equal(t, *digestRes.Manifest.Digest, alpineManifestDigest) + resp, err := client.GetTagProperties(ctx, "alpine", "3.17.1", nil) require.NoError(t, err) tagRes, err := client.GetManifestProperties(ctx, "alpine", *resp.Tag.Digest, nil) require.NoError(t, err) - require.Equal(t, digest, *tagRes.Manifest.Digest) + require.Equal(t, alpineManifestDigest, *tagRes.Manifest.Digest) } func TestClient_GetManifestProperties_empty(t *testing.T) { @@ -476,9 +497,7 @@ func TestClient_UpdateManifestProperties(t *testing.T) { ctx := context.Background() client, err := NewClient(endpoint, cred, &ClientOptions{ClientOptions: options}) require.NoError(t, err) - digest := "sha256:f271e74b17ced29b915d351685fd4644785c6d1559dd1f2d4189a5e851ef753a" - tag := "3.17.1" - resp, err := client.GetTagProperties(ctx, "alpine", tag, nil) + resp, err := client.GetTagProperties(ctx, "alpine", "3.17.1", nil) require.NoError(t, err) res, err := client.UpdateManifestProperties(ctx, "alpine", *resp.Tag.Digest, &ClientUpdateManifestPropertiesOptions{Value: &ManifestWriteableProperties{ CanWrite: to.Ptr(false), @@ -486,7 +505,7 @@ func TestClient_UpdateManifestProperties(t *testing.T) { }) require.NoError(t, err) require.False(t, *res.Manifest.ChangeableAttributes.CanWrite) - res, err = client.UpdateManifestProperties(ctx, "alpine", digest, &ClientUpdateManifestPropertiesOptions{Value: &ManifestWriteableProperties{ + res, err = client.UpdateManifestProperties(ctx, "alpine", alpineManifestDigest, &ClientUpdateManifestPropertiesOptions{Value: &ManifestWriteableProperties{ CanWrite: to.Ptr(true), }, }) @@ -608,10 +627,17 @@ func TestClient_UploadManifest(t *testing.T) { require.NoError(t, err) manifest, err := io.ReadAll(getRes.ManifestData) require.NoError(t, err) - uploadRes, err := client.UploadManifest(ctx, "hello-world", "test", "application/vnd.docker.distribution.manifest.v2+json", streaming.NopCloser(bytes.NewReader(manifest)), nil) + reader := bytes.NewReader(manifest) + uploadRes, err := client.UploadManifest(ctx, "hello-world", "test", "application/vnd.docker.distribution.manifest.v2+json", streaming.NopCloser(reader), nil) require.NoError(t, err) require.NotEmpty(t, *uploadRes.DockerContentDigest) fmt.Printf("uploaded manifest digest: %s\n", *uploadRes.DockerContentDigest) + _, err = reader.Seek(0, io.SeekStart) + require.NoError(t, err) + validateReader, err := NewDigestValidationReader(*uploadRes.DockerContentDigest, reader) + require.NoError(t, err) + _, err = io.ReadAll(validateReader) + require.NoError(t, err) } func TestClient_UploadManifest_empty(t *testing.T) { diff --git a/sdk/containers/azcontainerregistry/digest_helper.go b/sdk/containers/azcontainerregistry/digest_helper.go new file mode 100644 index 000000000000..b7c01d3fe428 --- /dev/null +++ b/sdk/containers/azcontainerregistry/digest_helper.go @@ -0,0 +1,160 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azcontainerregistry + +import ( + "crypto/sha256" + "encoding" + "errors" + "fmt" + "hash" + "io" + "strings" +) + +var ( + validatorCtors = map[string]func() digestValidator{"sha256": newSha256Validator} + ErrMismatchedHash = errors.New("mismatched hash") + ErrDigestAlgNotSupported = errors.New("digest algorithm not supported") +) + +type digestValidator interface { + io.Writer + validate(digest string) error +} + +func parseDigestValidator(digest string) (digestValidator, error) { + alg := digest[:strings.Index(digest, ":")] + if v, ok := validatorCtors[alg]; ok { + return v(), nil + } else { + return nil, ErrDigestAlgNotSupported + } +} + +type sha256Validator struct { + hash.Hash +} + +func newSha256Validator() digestValidator { + return &sha256Validator{sha256.New()} +} + +func (s *sha256Validator) validate(digest string) error { + if fmt.Sprintf("sha256:%x", s.Sum(nil)) != digest { + return ErrMismatchedHash + } + return nil +} + +// DigestValidationReader help to validate digest when fetching manifest or blob. +// Don't use this type directly, use NewDigestValidationReader() instead. +type DigestValidationReader struct { + digest string + digestValidator digestValidator + reader io.Reader +} + +// NewDigestValidationReader creates a new reader that help you to validate digest when you read manifest or blob data. +func NewDigestValidationReader(digest string, reader io.Reader) (*DigestValidationReader, error) { + validator, err := parseDigestValidator(digest) + if err == nil { + return &DigestValidationReader{ + digest: digest, + digestValidator: validator, + reader: reader, + }, nil + } else { + return nil, err + } +} + +// Read write to digest validator while read and validate digest when reach EOF. +func (d *DigestValidationReader) Read(p []byte) (int, error) { + n, err := d.reader.Read(p) + if err == nil || err == io.EOF { + wn, werr := d.digestValidator.Write(p[:n]) + if werr != nil { + return wn, werr + } + } + if err == io.EOF { + if err := d.digestValidator.validate(d.digest); err != nil { + return n, err + } + } + return n, err +} + +// BlobDigestCalculator help to calculate blob digest when uploading blob. +// Don't use this type directly, use NewBlobDigestCalculator() instead. +type BlobDigestCalculator struct { + h hash.Hash + state []byte +} + +// NewBlobDigestCalculator creates a new calculator to help to calculate blob digest when uploading blob. +func NewBlobDigestCalculator() *BlobDigestCalculator { + return &BlobDigestCalculator{ + h: sha256.New(), + } +} + +func (b *BlobDigestCalculator) saveState() { + b.state, _ = b.h.(encoding.BinaryMarshaler).MarshalBinary() +} + +func (b *BlobDigestCalculator) restoreState() { + if b.state == nil { + return + } + _ = b.h.(encoding.BinaryUnmarshaler).UnmarshalBinary(b.state) +} + +func (b *BlobDigestCalculator) getDigest() string { + return fmt.Sprintf("sha256:%x", b.h.Sum(nil)) +} + +func (b *BlobDigestCalculator) wrapReader(reader io.ReadSeeker) (io.Reader, error) { + size, err := reader.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return nil, err + } + _, err = reader.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + return newLimitTeeReader(reader, b.h, size), nil +} + +type wrappedReadSeeker struct { + io.Reader + io.Seeker +} + +// newLimitTeeReader returns a Reader that writes to w what it reads from r with n bytes limit. +func newLimitTeeReader(r io.Reader, w io.Writer, n int64) io.Reader { + return &limitTeeReader{r, w, n} +} + +type limitTeeReader struct { + r io.Reader + w io.Writer + n int64 +} + +func (lt *limitTeeReader) Read(p []byte) (int, error) { + n, err := lt.r.Read(p) + if n > 0 && lt.n > 0 { + wn, werr := lt.w.Write(p[:n]) + if werr != nil { + return wn, werr + } + lt.n -= int64(wn) + } + return n, err +} diff --git a/sdk/containers/azcontainerregistry/digest_helper_test.go b/sdk/containers/azcontainerregistry/digest_helper_test.go new file mode 100644 index 000000000000..57990b63b498 --- /dev/null +++ b/sdk/containers/azcontainerregistry/digest_helper_test.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azcontainerregistry + +import ( + "github.com/stretchr/testify/require" + "reflect" + "testing" +) + +func Test_parseDigestValidator(t *testing.T) { + tests := []struct { + name string + digest string + want digestValidator + wantErr error + }{ + {"sha256", "sha256:test", newSha256Validator(), nil}, + {"not supported", "sha512:test", nil, ErrDigestAlgNotSupported}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseDigestValidator(tt.digest) + if err != nil || tt.wantErr != nil { + if err != tt.wantErr { + t.Errorf("parseDigestValidator() error = %v, wantErr %v", err, tt.wantErr) + return + } + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("parseDigestValidator() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBlobDigestCalculator_saveAndRestoreState(t *testing.T) { + calculator := NewBlobDigestCalculator() + calculator.restoreState() + calculator.saveState() + calculator.restoreState() + calculator.h.Write([]byte("test1")) + sum := calculator.h.Sum(nil) + calculator.saveState() + calculator.h.Write([]byte("test2")) + require.NotEqual(t, sum, calculator.h.Sum(nil)) + calculator.restoreState() + require.Equal(t, sum, calculator.h.Sum(nil)) +} diff --git a/sdk/containers/azcontainerregistry/blob_client_example_test.go b/sdk/containers/azcontainerregistry/example_blob_client_test.go similarity index 62% rename from sdk/containers/azcontainerregistry/blob_client_example_test.go rename to sdk/containers/azcontainerregistry/example_blob_client_test.go index 12314bed7065..fe311b34af26 100644 --- a/sdk/containers/azcontainerregistry/blob_client_example_test.go +++ b/sdk/containers/azcontainerregistry/example_blob_client_test.go @@ -10,7 +10,11 @@ import ( "context" "fmt" "github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry" + "io" "log" + "os" + "strconv" + "strings" ) var blobClient *azcontainerregistry.BlobClient @@ -47,21 +51,68 @@ func ExampleBlobClient_DeleteBlob() { } func ExampleBlobClient_GetBlob() { - res, err := blobClient.GetBlob(context.TODO(), "prod/bash", "sha256:16463e0c481e161aabb735437d30b3c9c7391c2747cc564bb927e843b73dcb39", nil) + const digest = "sha256:16463e0c481e161aabb735437d30b3c9c7391c2747cc564bb927e843b73dcb39" + res, err := blobClient.GetBlob(context.TODO(), "prod/bash", digest, nil) if err != nil { log.Fatalf("failed to finish the request: %v", err) } - // deal with the blob io - _ = res.BlobData + reader, err := azcontainerregistry.NewDigestValidationReader(digest, res.BlobData) + if err != nil { + log.Fatalf("failed to create validation reader: %v", err) + } + f, err := os.Create("blob_file") + if err != nil { + log.Fatalf("failed to create blob file: %v", err) + } + defer f.Close() + _, err = io.Copy(f, reader) + if err != nil { + log.Fatalf("failed to write to the file: %v", err) + } } func ExampleBlobClient_GetChunk() { - res, err := blobClient.GetChunk(context.TODO(), "prod/bash", "sha256:16463e0c481e161aabb735437d30b3c9c7391c2747cc564bb927e843b73dcb39", "bytes=0-299", nil) + chunkSize := 1024 * 1024 + const digest = "sha256:16463e0c481e161aabb735437d30b3c9c7391c2747cc564bb927e843b73dcb39" + current := 0 + f, err := os.Create("blob_file") if err != nil { - log.Fatalf("failed to finish the request: %v", err) + log.Fatalf("failed to create blob file: %v", err) + } + defer f.Close() + for { + res, err := blobClient.GetChunk(context.TODO(), "prod/bash", digest, fmt.Sprintf("bytes=%d-%d", current, current+chunkSize-1), nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + chunk, err := io.ReadAll(res.ChunkData) + if err != nil { + log.Fatalf("failed to read the chunk: %v", err) + } + _, err = f.Write(chunk) + if err != nil { + log.Fatalf("failed to write to the file: %v", err) + } + + totalSize, _ := strconv.Atoi(strings.Split(*res.ContentRange, "/")[1]) + currentRangeEnd, _ := strconv.Atoi(strings.Split(strings.Split(*res.ContentRange, "/")[0], "-")[1]) + if totalSize == currentRangeEnd+1 { + break + } + current += chunkSize + } + _, err = f.Seek(0, io.SeekStart) + if err != nil { + log.Fatalf("failed to set to the start of the file: %v", err) + } + reader, err := azcontainerregistry.NewDigestValidationReader(digest, f) + if err != nil { + log.Fatalf("failed to create digest validation reader: %v", err) + } + _, err = io.ReadAll(reader) + if err != nil { + log.Fatalf("failed to validate digest: %v", err) } - // deal with the chunk io - _ = res.ChunkData } func ExampleBlobClient_GetUploadStatus() { diff --git a/sdk/containers/azcontainerregistry/blob_custom_client_example_test.go b/sdk/containers/azcontainerregistry/example_blob_custom_client_test.go similarity index 59% rename from sdk/containers/azcontainerregistry/blob_custom_client_example_test.go rename to sdk/containers/azcontainerregistry/example_blob_custom_client_test.go index 6707e0d99367..e7033ae07dc8 100644 --- a/sdk/containers/azcontainerregistry/blob_custom_client_example_test.go +++ b/sdk/containers/azcontainerregistry/example_blob_custom_client_test.go @@ -7,12 +7,13 @@ package azcontainerregistry_test import ( - "bytes" "context" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry" + "io" "log" + "os" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" ) @@ -42,9 +43,32 @@ func ExampleBlobClient_CompleteUpload() { func ExampleBlobClient_UploadChunk() { // calculator should be created when starting upload blob and passing to UploadChunk and CompleteUpload method calculator := azcontainerregistry.NewBlobDigestCalculator() - res, err := blobClient.UploadChunk(context.TODO(), "v2/blobland/blobs/uploads/2b28c60d-d296-44b7-b2b4-1f01c63195c6?_nouploadcache=false&_state=VYABvUSCNW2yY5e5VabLHppXqwU0K7cvT0YUdq57KBt7Ik5hbWUiOiJibG9ibGFuZCIsIlVVSUQiOiIyYjI4YzYwZC1kMjk2LTQ0YjctYjJiNC0xZjAxYzYzMTk1YzYiLCJPZmZzZXQiOjAsIlN0YXJ0ZWRBdCI6IjIwMTktMDgtMjdUMjM6NTI6NDcuMDUzNjU2Mjg1WiJ9", streaming.NopCloser(bytes.NewReader([]byte("U29tZXRoaW5nRWxzZQ=="))), calculator, nil) + location := "v2/blobland/blobs/uploads/2b28c60d-d296-44b7-b2b4-1f01c63195c6?_nouploadcache=false&_state=VYABvUSCNW2yY5e5VabLHppXqwU0K7cvT0YUdq57KBt7Ik5hbWUiOiJibG9ibGFuZCIsIlVVSUQiOiIyYjI4YzYwZC1kMjk2LTQ0YjctYjJiNC0xZjAxYzYzMTk1YzYiLCJPZmZzZXQiOjAsIlN0YXJ0ZWRBdCI6IjIwMTktMDgtMjdUMjM6NTI6NDcuMDUzNjU2Mjg1WiJ9" + f, err := os.Open("blob-file") if err != nil { - log.Fatalf("failed to finish the request: %v", err) + log.Fatalf("failed to read blob file: %v", err) + } + size, err := f.Seek(0, io.SeekEnd) + if err != nil { + log.Fatalf("failed to calculate blob size: %v", err) + } + chunkSize := int64(5) + current := int64(0) + for { + end := current + chunkSize + if end > size { + end = size + } + chunkReader := io.NewSectionReader(f, current, end-current) + uploadResp, err := blobClient.UploadChunk(context.TODO(), location, chunkReader, calculator, &azcontainerregistry.BlobClientUploadChunkOptions{RangeStart: to.Ptr(int32(current)), RangeEnd: to.Ptr(int32(end - 1))}) + if err != nil { + log.Fatalf("failed to upload chunk: %v", err) + } + location = *uploadResp.Location + current = end + if current >= size { + break + } } - fmt.Printf("upload location: %s", *res.Location) + fmt.Printf("upload location: %s", location) } diff --git a/sdk/containers/azcontainerregistry/client_example_test.go b/sdk/containers/azcontainerregistry/example_client_test.go similarity index 71% rename from sdk/containers/azcontainerregistry/client_example_test.go rename to sdk/containers/azcontainerregistry/example_client_test.go index 3186fcca8acf..2e37b31cafbf 100644 --- a/sdk/containers/azcontainerregistry/client_example_test.go +++ b/sdk/containers/azcontainerregistry/example_client_test.go @@ -44,12 +44,36 @@ func ExampleClient_DeleteTag() { } } -func ExampleClient_GetManifest() { +func ExampleClient_GetManifest_tag() { res, err := client.GetManifest(context.TODO(), "hello-world-dangling", "20190628-033033z", &azcontainerregistry.ClientGetManifestOptions{Accept: to.Ptr("application/vnd.docker.distribution.manifest.v2+json")}) if err != nil { log.Fatalf("failed to finish the request: %v", err) } - manifest, err := io.ReadAll(res.ManifestData) + reader, err := azcontainerregistry.NewDigestValidationReader(*res.DockerContentDigest, res.ManifestData) + if err != nil { + log.Fatalf("failed to create validation reader: %v", err) + } + manifest, err := io.ReadAll(reader) + if err != nil { + log.Fatalf("failed to read manifest data: %v", err) + } + fmt.Printf("manifest content: %s\n", manifest) +} + +func ExampleClient_GetManifest_reference() { + const reference = "sha256:110d2b6c84592561338aa040b1b14b7ab81c2f9edbd564c2285dd7d70d777086" + res, err := client.GetManifest(context.TODO(), "nanoserver", reference, &azcontainerregistry.ClientGetManifestOptions{Accept: to.Ptr("application/vnd.docker.distribution.manifest.v2+json")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + if reference != *res.DockerContentDigest { + log.Fatalf("failed to fetch manifest correctly: %v", err) + } + reader, err := azcontainerregistry.NewDigestValidationReader(reference, res.ManifestData) + if err != nil { + log.Fatalf("failed to create validation reader: %v", err) + } + manifest, err := io.ReadAll(reader) if err != nil { log.Fatalf("failed to read manifest data: %v", err) } @@ -155,14 +179,40 @@ func ExampleClient_UpdateTagProperties() { fmt.Printf("repository namoserver - tag 4.7.2-20180905-nanoserver-1803 - 'CanWrite' property: %t\n", *res.Tag.ChangeableAttributes.CanWrite) } -func ExampleClient_UploadManifest() { - payload, err := os.Open("example-manifest.json") +func ExampleClient_UploadManifest_tag() { + f, err := os.Open("example-manifest.json") if err != nil { log.Fatalf("failed to read manifest file: %v", err) } - resp, err := client.UploadManifest(context.TODO(), "nanoserver", "test", "application/vnd.docker.distribution.manifest.v2+json", payload, nil) + resp, err := client.UploadManifest(context.TODO(), "nanoserver", "test", "application/vnd.docker.distribution.manifest.v2+json", f, nil) if err != nil { log.Fatalf("failed to upload manifest: %v", err) } - fmt.Printf("uploaded manifest digest: %s", *resp.DockerContentDigest) + _, err = f.Seek(0, io.SeekStart) + if err != nil { + log.Fatalf("failed to validate manifest digest: %v", err) + } + reader, err := azcontainerregistry.NewDigestValidationReader(*resp.DockerContentDigest, f) + if err != nil { + log.Fatalf("failed to validate manifest digest: %v", err) + } + _, err = io.ReadAll(reader) + if err != nil { + log.Fatalf("failed to validate manifest digest: %v", err) + } +} + +func ExampleClient_UploadManifest_reference() { + f, err := os.Open("example-manifest.json") + if err != nil { + log.Fatalf("failed to read manifest file: %v", err) + } + const reference = "sha256:110d2b6c84592561338aa040b1b14b7ab81c2f9edbd564c2285dd7d70d777086" + resp, err := client.UploadManifest(context.TODO(), "nanoserver", reference, "application/vnd.docker.distribution.manifest.v2+json", f, nil) + if err != nil { + log.Fatalf("failed to upload manifest: %v", err) + } + if *resp.DockerContentDigest != reference { + log.Fatalf("failed to validate manifest digest: %v", err) + } } diff --git a/sdk/containers/azcontainerregistry/custom_client_example_test.go b/sdk/containers/azcontainerregistry/example_custom_client_test.go similarity index 100% rename from sdk/containers/azcontainerregistry/custom_client_example_test.go rename to sdk/containers/azcontainerregistry/example_custom_client_test.go diff --git a/sdk/containers/azcontainerregistry/example_download_image_test.go b/sdk/containers/azcontainerregistry/example_download_image_test.go new file mode 100644 index 000000000000..bf12a59eab91 --- /dev/null +++ b/sdk/containers/azcontainerregistry/example_download_image_test.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azcontainerregistry_test + +import ( + "context" + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry" + "io" + "log" + "os" + "strings" +) + +func Example_downloadImage() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + client, err := azcontainerregistry.NewClient("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + blobClient, err := azcontainerregistry.NewBlobClient("", cred, nil) + if err != nil { + log.Fatalf("failed to create blob client: %v", err) + } + ctx := context.Background() + + // Get manifest + manifestRes, err := client.GetManifest(ctx, "library/hello-world", "1.0.0", &azcontainerregistry.ClientGetManifestOptions{Accept: to.Ptr(string(azcontainerregistry.ContentTypeApplicationVndDockerDistributionManifestV2JSON))}) + if err != nil { + log.Fatalf("failed to get manifest: %v", err) + } + reader, err := azcontainerregistry.NewDigestValidationReader(*manifestRes.DockerContentDigest, manifestRes.ManifestData) + if err != nil { + log.Fatalf("failed to create validation reader: %v", err) + } + manifest, err := io.ReadAll(reader) + if err != nil { + log.Fatalf("failed to read manifest data: %v", err) + } + fmt.Printf("manifest: %s\n", manifest) + + // Get config + var manifestJSON map[string]any + err = json.Unmarshal(manifest, &manifestJSON) + if err != nil { + log.Fatalf("failed to unmarshal manifest: %v", err) + } + configDigest := manifestJSON["config"].(map[string]any)["digest"].(string) + configRes, err := blobClient.GetBlob(ctx, "library/hello-world", configDigest, nil) + if err != nil { + log.Fatalf("failed to get config: %v", err) + } + reader, err = azcontainerregistry.NewDigestValidationReader(configDigest, configRes.BlobData) + if err != nil { + log.Fatalf("failed to create validation reader: %v", err) + } + config, err := io.ReadAll(reader) + if err != nil { + log.Fatalf("failed to read config data: %v", err) + } + fmt.Printf("config: %s\n", config) + + // Get layers + layers := manifestJSON["layers"].([]any) + for _, layer := range layers { + layerDigest := layer.(map[string]any)["digest"].(string) + layerRes, err := blobClient.GetBlob(ctx, "library/hello-world", layerDigest, nil) + if err != nil { + log.Fatalf("failed to get layer: %v", err) + } + reader, err = azcontainerregistry.NewDigestValidationReader(layerDigest, layerRes.BlobData) + if err != nil { + log.Fatalf("failed to create validation reader: %v", err) + } + f, err := os.Create(strings.Split(layerDigest, ":")[1]) + if err != nil { + log.Fatalf("failed to create blob file: %v", err) + } + _, err = io.Copy(f, reader) + if err != nil { + log.Fatalf("failed to write to the file: %v", err) + } + err = f.Close() + if err != nil { + log.Fatalf("failed to close the file: %v", err) + } + } +} diff --git a/sdk/containers/azcontainerregistry/example_upload_download_blob_test.go b/sdk/containers/azcontainerregistry/example_upload_download_blob_test.go deleted file mode 100644 index a36fc19b3d13..000000000000 --- a/sdk/containers/azcontainerregistry/example_upload_download_blob_test.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package azcontainerregistry_test - -import ( - "bytes" - "context" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry" - "io" - "log" -) - -func Example_uploadAndDownloadBlob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - client, err := azcontainerregistry.NewBlobClient("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - ctx := context.Background() - blob := []byte("hello world") - startRes, err := client.StartUpload(ctx, "library/hello-world", nil) - if err != nil { - log.Fatalf("failed to start upload blob: %v", err) - } - calculator := azcontainerregistry.NewBlobDigestCalculator() - uploadResp, err := client.UploadChunk(ctx, *startRes.Location, bytes.NewReader(blob), calculator, nil) - if err != nil { - log.Fatalf("failed to upload blob: %v", err) - } - completeResp, err := client.CompleteUpload(ctx, *uploadResp.Location, calculator, nil) - if err != nil { - log.Fatalf("failed to complete upload: %v", err) - } - fmt.Printf("uploaded blob digest: %s", *completeResp.DockerContentDigest) - downloadRes, err := client.GetBlob(ctx, "library/hello-world", *completeResp.DockerContentDigest, nil) - if err != nil { - log.Fatalf("failed to download blob: %v", err) - } - downloadBlob, err := io.ReadAll(downloadRes.BlobData) - if err != nil { - log.Fatalf("failed to read blob: %v", err) - } - fmt.Printf("blob content: %s", downloadBlob) -} diff --git a/sdk/containers/azcontainerregistry/example_upload_manifest_test.go b/sdk/containers/azcontainerregistry/example_upload_image_test.go similarity index 82% rename from sdk/containers/azcontainerregistry/example_upload_manifest_test.go rename to sdk/containers/azcontainerregistry/example_upload_image_test.go index ed3e1ee50c01..da49491b2a66 100644 --- a/sdk/containers/azcontainerregistry/example_upload_manifest_test.go +++ b/sdk/containers/azcontainerregistry/example_upload_image_test.go @@ -16,7 +16,7 @@ import ( "log" ) -func Example_uploadManifest() { +func Example_uploadImage() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) @@ -67,24 +67,25 @@ func Example_uploadManifest() { log.Fatalf("failed to complete config upload: %v", err) } manifest := fmt.Sprintf(`{ - schemaVersion: 2, - config: { - mediaType: "application/vnd.oci.image.config.v1+json", - digest: %s, - size: %d, + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "%s", + "size": %d }, - layers: [ + "layers": [ { - mediaType: "application/vnd.oci.image.layer.v1.tar", - digest: %s, - size: %d, - annotations: { - title: "artifact.txt", - }, - }, - ], + "mediaType": "application/vnd.oci.image.layer.v1.tar", + "digest": "%s", + "size": %d, + "annotations": { + "title": "artifact.txt" + } + } + ] }`, layerDigest, len(config), *completeResp.DockerContentDigest, len(layer)) - uploadManifestRes, err := client.UploadManifest(ctx, "library/hello-world", "1.0.0", "application/vnd.oci.image.config.v1+json", streaming.NopCloser(bytes.NewReader([]byte(manifest))), nil) + uploadManifestRes, err := client.UploadManifest(ctx, "library/hello-world", "1.0.0", azcontainerregistry.ContentTypeApplicationVndDockerDistributionManifestV2JSON, streaming.NopCloser(bytes.NewReader([]byte(manifest))), nil) if err != nil { log.Fatalf("failed to upload manifest: %v", err) }