From f9d27e8c6d95a04eb7156ce2dfb0be3273051244 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miloslav=20Trma=C4=8D?= Date: Wed, 13 Mar 2024 21:58:07 +0100 Subject: [PATCH] Add digest -> specific variant, annotation data to BIC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cache implementations are recording both the base and specific compression variant; CandidateLocations2 all call CandidateTemplateWithCompression to choose the appropriate variants to return based on CandidateLocations2Options. This way, neither the BIC implementations nor the transports are not responsible for converting zstd:chunked entries to zstd entries if the user wants the latter. Signed-off-by: Miloslav Trmač --- copy/compression.go | 8 +- internal/blobinfocache/types.go | 21 ++-- internal/manifest/manifest.go | 5 - internal/manifest/manifest_test.go | 3 + pkg/blobinfocache/boltdb/boltdb.go | 92 +++++++++++++--- .../internal/prioritize/prioritize.go | 70 ++++++++---- .../internal/prioritize/prioritize_test.go | 103 +++++++++++++++--- pkg/blobinfocache/internal/test/test.go | 86 ++++++++++----- pkg/blobinfocache/memory/memory.go | 38 +++++-- pkg/blobinfocache/sqlite/sqlite.go | 74 +++++++++++-- 10 files changed, 387 insertions(+), 113 deletions(-) diff --git a/copy/compression.go b/copy/compression.go index 081c49312..e859d0139 100644 --- a/copy/compression.go +++ b/copy/compression.go @@ -351,7 +351,9 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about // inconsistent data to be logged. c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ - BaseVariantCompressor: d.uploadedCompressorName, + BaseVariantCompressor: d.uploadedCompressorName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }) } } @@ -361,7 +363,9 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest, // so record neither the variant name, nor the TOC digest. c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{ - BaseVariantCompressor: d.srcCompressorBaseVariantName, + BaseVariantCompressor: d.srcCompressorBaseVariantName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }) } return nil diff --git a/internal/blobinfocache/types.go b/internal/blobinfocache/types.go index 276c8073e..acf82ee63 100644 --- a/internal/blobinfocache/types.go +++ b/internal/blobinfocache/types.go @@ -37,8 +37,11 @@ type BlobInfoCache2 interface { // RecordDigestCompressorData records data for the blob with the specified digest. // WARNING: Only call this with LOCALLY VERIFIED data: - // - don’t record a compressor for a digest just because some remote author claims so - // (e.g. because a manifest says so); + // - don’t record a compressor for a digest just because some remote author claims so + // (e.g. because a manifest says so); + // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant + // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them + // in a manifest) // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) @@ -52,6 +55,9 @@ type BlobInfoCache2 interface { // (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.) type DigestCompressorData struct { BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression. + // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression: + SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant) + SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant. } // CandidateLocations2Options are used in CandidateLocations2. @@ -66,9 +72,10 @@ type CandidateLocations2Options struct { // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed - UnknownLocation bool // is true when `Location` for this blob is not set - Location types.BICLocationReference // not set if UnknownLocation is set to `true` + Digest digest.Digest + CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go index ee0ddc772..3fb52104a 100644 --- a/internal/manifest/manifest.go +++ b/internal/manifest/manifest.go @@ -205,11 +205,6 @@ type ReuseConditions struct { // (which can be nil to represent uncompressed or unknown) matches reuseConditions. func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool { if c.RequiredCompression != nil { - if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. - // The caller must re-compress to build those annotations. - return false - } if candidateCompression == nil || (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { return false diff --git a/internal/manifest/manifest_test.go b/internal/manifest/manifest_test.go index ae2cc32ca..ebb3ef559 100644 --- a/internal/manifest/manifest_test.go +++ b/internal/manifest/manifest_test.go @@ -192,6 +192,9 @@ func TestCandidateCompressionMatchesReuseConditions(t *testing.T) { }{ // RequiredCompression restrictions {&compression.Zstd, nil, &compression.Zstd, true}, + {&compression.Zstd, nil, &compression.ZstdChunked, true}, + {&compression.ZstdChunked, nil, &compression.Zstd, false}, + {&compression.ZstdChunked, nil, &compression.ZstdChunked, true}, {&compression.Gzip, nil, &compression.Zstd, false}, {&compression.Zstd, nil, nil, false}, {nil, nil, &compression.Zstd, true}, diff --git a/pkg/blobinfocache/boltdb/boltdb.go b/pkg/blobinfocache/boltdb/boltdb.go index 036d2c014..2d4137ffd 100644 --- a/pkg/blobinfocache/boltdb/boltdb.go +++ b/pkg/blobinfocache/boltdb/boltdb.go @@ -2,6 +2,8 @@ package boltdb import ( + "bytes" + "encoding/json" "errors" "fmt" "io/fs" @@ -30,6 +32,10 @@ var ( // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. digestCompressorBucket = []byte("digestCompressor") + // digestSpecificVariantCompressorBucket stores a mapping from any digest to a (compressor, NUL byte, annotations as JSON), valid + // only if digestCompressorBucket contains a value. The compressor is not `UnknownCompression`. + digestSpecificVariantCompressorBucket = []byte("digestSpecificVariantCompressor") + // It may not exist in caches created by older versions, even if digestCompressorBucket is present. // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest // (as a set of key=digest, value="" pairs) digestByUncompressedBucket = []byte("digestByUncompressed") @@ -299,25 +305,68 @@ func (bdc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompresse // WARNING: Only call this with LOCALLY VERIFIED data: // - don’t record a compressor for a digest just because some remote author claims so // (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) // // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. func (bdc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { _ = bdc.update(func(tx *bolt.Tx) error { + key := []byte(anyDigest.String()) + b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) if err != nil { return err } - key := []byte(anyDigest.String()) + warned := false if previousBytes := b.Get(key); previousBytes != nil { if string(previousBytes) != data.BaseVariantCompressor { logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), data.BaseVariantCompressor) + warned = true } } if data.BaseVariantCompressor == blobinfocache.UnknownCompression { - return b.Delete(key) + if err := b.Delete(key); err != nil { + return err + } + if b := tx.Bucket(digestSpecificVariantCompressorBucket); b != nil { + if err := b.Delete(key); err != nil { + return err + } + } } - return b.Put(key, []byte(data.BaseVariantCompressor)) + if err := b.Put(key, []byte(data.BaseVariantCompressor)); err != nil { + return err + } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + b, err := tx.CreateBucketIfNotExists(digestSpecificVariantCompressorBucket) + if err != nil { + return err + } + if !warned { // Don’t warn twice about the same digest + if previousBytes := b.Get(key); previousBytes != nil { + if prevSVCBytes, _, ok := bytes.Cut(previousBytes, []byte{0}); ok { + prevSVC := string(prevSVCBytes) + if data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return err + } + data := bytes.Clone([]byte(data.SpecificVariantCompressor)) + data = append(data, 0) + data = append(data, annotations...) + if err := b.Put(key, data); err != nil { + return err + } + } + return nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } @@ -354,24 +403,36 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket // (which might be nil) with corresponding compression -// info from compressionBucket (which might be nil), and returns the result of appending them +// info from compressionBucket and specificVariantCompresssionBucket (which might be nil), and returns the result of appending them // to candidates. // v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates // with unknown compression. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, - v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket, specificVariantCompresssionBucket *bolt.Bucket, + digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { digestKey := []byte(digest.String()) - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if compressionBucket != nil { // the bucket won't exist if the cache was created by a v1 implementation and // hasn't yet been updated by a v2 implementation if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 { - compressorName = string(compressorNameValue) + compressionData.BaseVariantCompressor = string(compressorNameValue) + } + if specificVariantCompresssionBucket != nil { + if svcData := specificVariantCompresssionBucket.Get(digestKey); svcData != nil { + if compressorBytes, annotationBytes, ok := bytes.Cut(svcData, []byte{0}); ok { + compressionData.SpecificVariantCompressor = string(compressorBytes) + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return candidates // FIXME? Log error (but throttle the log volume on repeated accesses)? + } + } + } } } - template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressorName, - }) + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) if template == nil { return candidates } @@ -416,11 +477,12 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types if scopeBucket != nil { scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) } - // compressionBucket won't have been created if previous writers never recorded info about compression, + // compressionBucket and svCompressionBucket won't have been created if previous writers never recorded info about compression, // and we don't want to fail just because of that compressionBucket := tx.Bucket(digestCompressorBucket) + specificVariantCompressionBucket := tx.Bucket(digestSpecificVariantCompressorBucket) - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, v2Options) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, specificVariantCompressionBucket, primaryDigest, v2Options) if canSubstitute { if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { b := tx.Bucket(digestByUncompressedBucket) @@ -433,7 +495,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types return err } if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, v2Options) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, specificVariantCompressionBucket, d, v2Options) } return nil }); err != nil { @@ -442,7 +504,7 @@ func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types } } if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, v2Options) + res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, specificVariantCompressionBucket, uncompressedDigestValue, v2Options) } } } diff --git a/pkg/blobinfocache/internal/prioritize/prioritize.go b/pkg/blobinfocache/internal/prioritize/prioritize.go index 03548209f..40ed09bed 100644 --- a/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -28,9 +28,10 @@ const replacementUnknownLocationAttempts = 2 // CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest, // which can be later combined with information about a location. type CandidateTemplate struct { - digest digest.Digest - compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + digest digest.Digest + compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm } // CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable @@ -40,7 +41,7 @@ type CandidateTemplate struct { // if not nil, the call is assumed to be CandidateLocations2. func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate { if v2Options == nil { - return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm values are not used. + return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used. digest: digest, } } @@ -60,14 +61,40 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation return nil } return &CandidateTemplate{ - digest: digest, - compressionOperation: types.Decompress, - compressionAlgorithm: nil, + digest: digest, + compressionOperation: types.Decompress, + compressionAlgorithm: nil, + compressionAnnotations: nil, } case blobinfocache.UnknownCompression: logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) return nil // Not allowed with CandidateLocations2 default: + // See if we can use the specific variant, first. + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor) + if err != nil { + logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v", + data.SpecificVariantCompressor, digest.String(), err) + } else { + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v", + data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + } else { + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: data.SpecificVariantAnnotations, + } + } + } + } + + // Try the base variant. algo, err := compression.AlgorithmByName(data.BaseVariantCompressor) if err != nil { logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", @@ -83,9 +110,10 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation return nil } return &CandidateTemplate{ - digest: digest, - compressionOperation: types.Compress, - compressionAlgorithm: &algo, + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: nil, } } } @@ -100,11 +128,12 @@ type CandidateWithTime struct { func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime { return CandidateWithTime{ candidate: blobinfocache.BICReplacementCandidate2{ - Digest: template.digest, - CompressionOperation: template.compressionOperation, - CompressionAlgorithm: template.compressionAlgorithm, - UnknownLocation: false, - Location: location, + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: false, + Location: location, }, lastSeen: lastSeen, } @@ -114,11 +143,12 @@ func (template CandidateTemplate) CandidateWithLocation(location types.BICLocati func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime { return CandidateWithTime{ candidate: blobinfocache.BICReplacementCandidate2{ - Digest: template.digest, - CompressionOperation: template.compressionOperation, - CompressionAlgorithm: template.compressionAlgorithm, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, }, lastSeen: time.Time{}, } diff --git a/pkg/blobinfocache/internal/prioritize/prioritize_test.go b/pkg/blobinfocache/internal/prioritize/prioritize_test.go index 0d9fe12c1..a344d7e27 100644 --- a/pkg/blobinfocache/internal/prioritize/prioritize_test.go +++ b/pkg/blobinfocache/internal/prioritize/prioritize_test.go @@ -56,14 +56,26 @@ var ( ) func TestCandidateTemplateWithCompression(t *testing.T) { + chunkedAnnotations := map[string]string{"a": "b"} uncompressedData := blobinfocache.DigestCompressorData{ - BaseVariantCompressor: blobinfocache.Uncompressed, + BaseVariantCompressor: blobinfocache.Uncompressed, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, } gzipData := blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressiontypes.GzipAlgorithmName, + BaseVariantCompressor: compressiontypes.GzipAlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, } zstdData := blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } + zstdChunkedData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: compressiontypes.ZstdChunkedAlgorithmName, + SpecificVariantAnnotations: chunkedAnnotations, } for _, c := range []struct { @@ -72,14 +84,17 @@ func TestCandidateTemplateWithCompression(t *testing.T) { data blobinfocache.DigestCompressorData v2Matches bool // if v2Matches: - v2Op types.LayerCompression - v2Algo string + v2Op types.LayerCompression + v2Algo string + v2Annotations map[string]string }{ { name: "unknown", requiredCompression: nil, data: blobinfocache.DigestCompressorData{ - BaseVariantCompressor: blobinfocache.UnknownCompression, + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }, v2Matches: false, }, @@ -90,6 +105,7 @@ func TestCandidateTemplateWithCompression(t *testing.T) { v2Matches: true, v2Op: types.Decompress, v2Algo: "", + v2Annotations: nil, }, { name: "uncompressed, want gzip", @@ -104,6 +120,7 @@ func TestCandidateTemplateWithCompression(t *testing.T) { v2Matches: true, v2Op: types.Compress, v2Algo: compressiontypes.GzipAlgorithmName, + v2Annotations: nil, }, { name: "gzip, want zstd", @@ -115,7 +132,9 @@ func TestCandidateTemplateWithCompression(t *testing.T) { name: "unknown base", requiredCompression: nil, data: blobinfocache.DigestCompressorData{ - BaseVariantCompressor: "this value is unknown", + BaseVariantCompressor: "this value is unknown", + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }, v2Matches: false, }, @@ -126,6 +145,7 @@ func TestCandidateTemplateWithCompression(t *testing.T) { v2Matches: true, v2Op: types.Compress, v2Algo: compressiontypes.ZstdAlgorithmName, + v2Annotations: nil, }, { name: "zstd, want gzip", @@ -140,6 +160,7 @@ func TestCandidateTemplateWithCompression(t *testing.T) { v2Matches: true, v2Op: types.Compress, v2Algo: compressiontypes.ZstdAlgorithmName, + v2Annotations: nil, }, { name: "zstd, want zstd:chunked", @@ -147,12 +168,59 @@ func TestCandidateTemplateWithCompression(t *testing.T) { data: zstdData, v2Matches: false, }, + { + name: "zstd:chunked", + requiredCompression: nil, + data: zstdChunkedData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdChunkedAlgorithmName, + v2Annotations: chunkedAnnotations, + }, + { + name: "zstd:chunked, want gzip", + requiredCompression: &compression.Gzip, + data: zstdChunkedData, + v2Matches: false, + }, + { + name: "zstd:chunked, want zstd", // Note that we return the full chunked data in this case. + requiredCompression: &compression.Zstd, + data: zstdChunkedData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdChunkedAlgorithmName, + v2Annotations: chunkedAnnotations, + }, + { + name: "zstd:chunked, want zstd:chunked", + requiredCompression: &compression.ZstdChunked, + data: zstdChunkedData, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdChunkedAlgorithmName, + v2Annotations: chunkedAnnotations, + }, + { + name: "zstd:unknown", + requiredCompression: nil, + data: blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: "this value is unknown", + SpecificVariantAnnotations: chunkedAnnotations, + }, + v2Matches: true, + v2Op: types.Compress, + v2Algo: compressiontypes.ZstdAlgorithmName, + v2Annotations: nil, + }, } { res := CandidateTemplateWithCompression(nil, digestCompressedPrimary, c.data) assert.Equal(t, &CandidateTemplate{ - digest: digestCompressedPrimary, - compressionOperation: types.PreserveOriginal, - compressionAlgorithm: nil, + digest: digestCompressedPrimary, + compressionOperation: types.PreserveOriginal, + compressionAlgorithm: nil, + compressionAnnotations: nil, }, res, c.name) // These tests only use RequiredCompression in CandidateLocations2Options for clarity; @@ -172,13 +240,16 @@ func TestCandidateTemplateWithCompression(t *testing.T) { require.NotNil(t, res.compressionAlgorithm, c.name) assert.Equal(t, c.v2Algo, res.compressionAlgorithm.Name()) } + assert.Equal(t, c.v2Annotations, res.compressionAnnotations, c.name) } } } func TestCandidateWithLocation(t *testing.T) { template := CandidateTemplateWithCompression(&blobinfocache.CandidateLocations2Options{}, digestCompressedPrimary, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: compressiontypes.ZstdChunkedAlgorithmName, + SpecificVariantAnnotations: map[string]string{"a": "b"}, }) require.NotNil(t, template) loc := types.BICLocationReference{Opaque: "opaque"} @@ -186,7 +257,8 @@ func TestCandidateWithLocation(t *testing.T) { res := template.CandidateWithLocation(loc, time) assert.Equal(t, digestCompressedPrimary, res.candidate.Digest) assert.Equal(t, types.Compress, res.candidate.CompressionOperation) - assert.Equal(t, compressiontypes.ZstdAlgorithmName, res.candidate.CompressionAlgorithm.Name()) + assert.Equal(t, compressiontypes.ZstdChunkedAlgorithmName, res.candidate.CompressionAlgorithm.Name()) + assert.Equal(t, map[string]string{"a": "b"}, res.candidate.CompressionAnnotations) assert.Equal(t, false, res.candidate.UnknownLocation) assert.Equal(t, loc, res.candidate.Location) assert.Equal(t, time, res.lastSeen) @@ -194,13 +266,16 @@ func TestCandidateWithLocation(t *testing.T) { func TestCandidateWithUnknownLocation(t *testing.T) { template := CandidateTemplateWithCompression(&blobinfocache.CandidateLocations2Options{}, digestCompressedPrimary, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: compressiontypes.ZstdChunkedAlgorithmName, + SpecificVariantAnnotations: map[string]string{"a": "b"}, }) require.NotNil(t, template) res := template.CandidateWithUnknownLocation() assert.Equal(t, digestCompressedPrimary, res.candidate.Digest) assert.Equal(t, types.Compress, res.candidate.CompressionOperation) - assert.Equal(t, compressiontypes.ZstdAlgorithmName, res.candidate.CompressionAlgorithm.Name()) + assert.Equal(t, compressiontypes.ZstdChunkedAlgorithmName, res.candidate.CompressionAlgorithm.Name()) + assert.Equal(t, map[string]string{"a": "b"}, res.candidate.CompressionAnnotations) assert.Equal(t, true, res.candidate.UnknownLocation) } diff --git a/pkg/blobinfocache/internal/test/test.go b/pkg/blobinfocache/internal/test/test.go index 9ad471a64..cb4a4e6b0 100644 --- a/pkg/blobinfocache/internal/test/test.go +++ b/pkg/blobinfocache/internal/test/test.go @@ -25,7 +25,7 @@ const ( compressorNameU = blobinfocache.Uncompressed compressorNameA = compressiontypes.GzipAlgorithmName compressorNameB = compressiontypes.ZstdAlgorithmName - compressorNameCU = compressiontypes.ZstdChunkedAlgorithmName + compressorNameCU = compressiontypes.XzAlgorithmName digestUnknownLocation = digest.Digest("sha256:7777777777777777777777777777777777777777777777777777777777777777") digestFilteringUncompressed = digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") @@ -150,6 +150,7 @@ func testGenericRecordKnownLocations(t *testing.T, cache blobinfocache.BlobInfoC type candidate struct { d digest.Digest cn string + ca map[string]string lr string } @@ -194,11 +195,12 @@ func assertCandidatesMatch2(t *testing.T, scopeName string, expected []candidate algo = &algo_ } e[i] = blobinfocache.BICReplacementCandidate2{ - Digest: ev.d, - CompressionOperation: op, - CompressionAlgorithm: algo, - UnknownLocation: false, - Location: types.BICLocationReference{Opaque: scopeName + ev.lr}, + Digest: ev.d, + CompressionOperation: op, + CompressionAlgorithm: algo, + CompressionAnnotations: ev.ca, + UnknownLocation: false, + Location: types.BICLocationReference{Opaque: scopeName + ev.lr}, } } assertCandidatesMatch2Native(t, e, actual) @@ -283,14 +285,16 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa {"B", digestCompressedB, compressorNameB}, {"CU", digestCompressedUnrelated, compressorNameCU}, } + chunkedAnnotations := map[string]string{"a": "b"} digestNameSetFiltering := []struct { // Used primarily to test filtering in CandidateLocations2Options - n string - d digest.Digest - m string + n string + d digest.Digest + base, specific string + annotations map[string]string }{ - {"gzip", digestGzip, compressiontypes.GzipAlgorithmName}, - {"zstd", digestZstd, compressiontypes.ZstdAlgorithmName}, - {"zstdChunked", digestZstdChunked, compressiontypes.ZstdChunkedAlgorithmName}, + {"gzip", digestGzip, compressiontypes.GzipAlgorithmName, blobinfocache.UnknownCompression, nil}, + {"zstd", digestZstd, compressiontypes.ZstdAlgorithmName, blobinfocache.UnknownCompression, nil}, + {"zstdChunked", digestZstdChunked, compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName, chunkedAnnotations}, } for _, e := range digestNameSetFiltering { // digestFilteringUncompressed exists only to allow the three entries to be considered as candidates cache.RecordDigestUncompressedPair(e.d, digestFilteringUncompressed) @@ -315,7 +319,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa // If a record exists with compression without Location then // then return a record without location and with `UnknownLocation: true` cache.RecordDigestCompressorData(digestUnknownLocation, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressiontypes.Bzip2AlgorithmName, + BaseVariantCompressor: compressiontypes.Bzip2AlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }) res = cache.CandidateLocations2(transport, scope, digestUnknownLocation, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, @@ -358,7 +364,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa if scopeIndex != 0 { for _, e := range digestNameSetPrioritization { cache.RecordDigestCompressorData(e.d, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: blobinfocache.UnknownCompression, + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }) } } @@ -428,7 +436,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa // Set the "known" compression values for _, e := range digestNameSetPrioritization { cache.RecordDigestCompressorData(e.d, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: e.m, + BaseVariantCompressor: e.m, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, }) } @@ -512,7 +522,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa } for _, e := range digestNameSetFiltering { cache.RecordDigestCompressorData(e.d, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: e.m, + BaseVariantCompressor: e.base, + SpecificVariantCompressor: e.specific, + SpecificVariantAnnotations: e.annotations, }) } @@ -521,9 +533,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa CanSubstitute: true, }) assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order - {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"}, - {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"}, - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) // Manifest format filters @@ -532,16 +544,16 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa PossibleManifestFormats: []string{manifest.DockerV2Schema2MediaType}, }) assertCandidatesMatch2(t, scopeName, []candidate{ - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, PossibleManifestFormats: []string{imgspecv1.MediaTypeImageManifest}, }) assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order - {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"}, - {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"}, - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) // Compression algorithm filters @@ -550,21 +562,37 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa RequiredCompression: &compression.Gzip, }) assertCandidatesMatch2(t, scopeName, []candidate{ - {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, lr: "gzip"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, RequiredCompression: &compression.ZstdChunked, }) - // Right now, zstd:chunked requests never match a candidate, see CandidateCompressionMatchesReuseConditions(). - assertCandidatesMatch2(t, scopeName, []candidate{}, res) + assertCandidatesMatch2(t, scopeName, []candidate{ + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + }, res) res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ CanSubstitute: true, RequiredCompression: &compression.Zstd, }) - assertCandidatesMatch2(t, scopeName, []candidate{ // When the user asks for zstd, zstd:chunked candidates are also acceptable. - {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"}, - {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, lr: "zstd"}, + assertCandidatesMatch2(t, scopeName, []candidate{ // When the user asks for zstd, zstd:chunked candidates are also acceptable, and include the chunked information. + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + }, res) + + // After RecordDigestCompressorData with zstd:chunked details, a later call with zstd-only does not drop the chunked details. + cache.RecordDigestCompressorData(digestZstdChunked, blobinfocache.DigestCompressorData{ + BaseVariantCompressor: compressiontypes.ZstdAlgorithmName, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) + res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{ + CanSubstitute: true, + }) + assertCandidatesMatch2(t, scopeName, []candidate{ // Sorted in the reverse of digestNameSetFiltering order + {d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, ca: chunkedAnnotations, lr: "zstdChunked"}, + {d: digestZstd, cn: compressiontypes.ZstdAlgorithmName, ca: nil, lr: "zstd"}, + {d: digestGzip, cn: compressiontypes.GzipAlgorithmName, ca: nil, lr: "gzip"}, }, res) } } diff --git a/pkg/blobinfocache/memory/memory.go b/pkg/blobinfocache/memory/memory.go index 067c6b7e1..9d4125d66 100644 --- a/pkg/blobinfocache/memory/memory.go +++ b/pkg/blobinfocache/memory/memory.go @@ -28,7 +28,7 @@ type cache struct { uncompressedDigestsByTOC map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference - compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest + compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression } // New returns a BlobInfoCache implementation which is in-memory only. @@ -49,7 +49,7 @@ func new2() *cache { uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{}, digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, - compressors: map[digest.Digest]string{}, + compressors: map[digest.Digest]blobinfocache.DigestCompressorData{}, } } @@ -148,20 +148,36 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type // WARNING: Only call this with LOCALLY VERIFIED data: // - don’t record a compressor for a digest just because some remote author claims so // (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) // // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { mem.mutex.Lock() defer mem.mutex.Unlock() - if previous, ok := mem.compressors[anyDigest]; ok && previous != data.BaseVariantCompressor { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + if previous, ok := mem.compressors[anyDigest]; ok { + if previous.BaseVariantCompressor != data.BaseVariantCompressor { + logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor) + } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != data.SpecificVariantCompressor { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor) + } + // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic. + + // Preserve specific variant information if the incoming data does not have it. + if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != blobinfocache.UnknownCompression { + data.SpecificVariantCompressor = previous.SpecificVariantCompressor + data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations + } } if data.BaseVariantCompressor == blobinfocache.UnknownCompression { delete(mem.compressors, anyDigest) return } - mem.compressors[anyDigest] = data.BaseVariantCompressor + mem.compressors[anyDigest] = data } // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory @@ -171,13 +187,15 @@ func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi // with unknown compression. func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if v, ok := mem.compressors[digest]; ok { - compressorName = v + compressionData = v } - template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressorName, - }) + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) if template == nil { return candidates } diff --git a/pkg/blobinfocache/sqlite/sqlite.go b/pkg/blobinfocache/sqlite/sqlite.go index 9d000a5c7..1a7931023 100644 --- a/pkg/blobinfocache/sqlite/sqlite.go +++ b/pkg/blobinfocache/sqlite/sqlite.go @@ -3,6 +3,7 @@ package sqlite import ( "database/sql" + "encoding/json" "errors" "fmt" "sync" @@ -303,6 +304,16 @@ func ensureDBHasCurrentSchema(db *sql.DB) error { `uncompressedDigest TEXT NOT NULL )`, }, + { + "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors. + `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // The compressor is not `UnknownCompression`. + `specificVariantCompressor TEXT NOT NULL, + specificVariantAnnotations BLOB NOT NULL + )`, + }, } _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { @@ -461,6 +472,9 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type // WARNING: Only call this with LOCALLY VERIFIED data: // - don’t record a compressor for a digest just because some remote author claims so // (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) // // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. @@ -468,21 +482,46 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) if err != nil { - return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest) + return void{}, fmt.Errorf("looking for compressor of %q", anyDigest) } + warned := false if gotPrevious && previous != data.BaseVariantCompressor { logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + warned = true } if data.BaseVariantCompressor == blobinfocache.UnknownCompression { if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) } + if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err) + } } else { if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", anyDigest.String(), data.BaseVariantCompressor); err != nil { return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err) } } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + if !warned { // Don’t warn twice about the same digest + prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest) + } + if found && data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return void{}, err + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)", + anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil { + return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err) + } + } return void{}, nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } @@ -493,19 +532,32 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi // with unknown compression. func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if v2Options != nil { - compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) - if err != nil { - return nil, fmt.Errorf("scanning compressorName: %w", err) - } - if found { - compressorName = compressor + var baseVariantCompressor string + var specificVariantCompressor sql.NullString + var annotationBytes []byte + switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+ + "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()). + Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); { + case errors.Is(err, sql.ErrNoRows): // Do nothing + case err != nil: + return nil, fmt.Errorf("scanning compressor data: %w", err) + default: + compressionData.BaseVariantCompressor = baseVariantCompressor + if specificVariantCompressor.Valid && annotationBytes != nil { + compressionData.SpecificVariantCompressor = specificVariantCompressor.String + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return nil, err + } + } } } - template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{ - BaseVariantCompressor: compressorName, - }) + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) if template == nil { return candidates, nil }