Skip to content

Commit c7233ee

Browse files
committed
Merge remote-tracking branch 'upstream/main' into HEAD
2 parents 5af61e0 + 8c7c58c commit c7233ee

34 files changed

+1369
-492
lines changed

Diff for: .codespellrc

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# See https://github.com/codespell-project/codespell#using-a-config-file
22
[codespell]
3-
skip = .git,*.pdf,*.svg,.codespellrc,go.sum,system_registries_v2_test.go,Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej,*.gpg
4-
check-hidden = true
5-
ignore-regex = \b(isT|BU|this/doesnt:match)\b
6-
ignore-words-list = te,pathc
3+
skip = ./vendor,./.git,./go.sum,*.gpg
4+
5+
# NOTE words added to the list below need to be lowercased.
6+
ignore-words-list = te,bu

Diff for: copy/compression.go

+98-63
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
"github.com/containers/image/v5/pkg/compression"
1212
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
1313
"github.com/containers/image/v5/types"
14+
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
1415
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
1516
"github.com/sirupsen/logrus"
1617
)
@@ -34,10 +35,10 @@ var (
3435

3536
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
3637
type bpDetectCompressionStepData struct {
37-
isCompressed bool
38-
format compressiontypes.Algorithm // Valid if isCompressed
39-
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
40-
srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
38+
isCompressed bool
39+
format compressiontypes.Algorithm // Valid if isCompressed
40+
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
41+
srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob.
4142
}
4243

4344
// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
@@ -51,15 +52,25 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
5152
}
5253
stream.reader = reader
5354

55+
if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName {
56+
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
57+
if err != nil {
58+
return bpDetectCompressionStepData{}, err
59+
}
60+
if tocDigest != nil {
61+
format = compression.ZstdChunked
62+
}
63+
64+
}
5465
res := bpDetectCompressionStepData{
5566
isCompressed: decompressor != nil,
5667
format: format,
5768
decompressor: decompressor,
5869
}
5970
if res.isCompressed {
60-
res.srcCompressorName = format.Name()
71+
res.srcCompressorBaseVariantName = format.BaseVariantName()
6172
} else {
62-
res.srcCompressorName = internalblobinfocache.Uncompressed
73+
res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed
6374
}
6475

6576
if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
@@ -70,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
7081

7182
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
7283
type bpCompressionStepData struct {
73-
operation bpcOperation // What we are actually doing
74-
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
75-
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
76-
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
77-
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
78-
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
79-
closers []io.Closer // Objects to close after the upload is done, if any.
84+
operation bpcOperation // What we are actually doing
85+
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
86+
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
87+
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
88+
srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
89+
uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob.
90+
uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob.
91+
closers []io.Closer // Objects to close after the upload is done, if any.
8092
}
8193

8294
type bpcOperation int
@@ -128,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp
128140
// We can’t do anything with an encrypted blob unless decrypted.
129141
logrus.Debugf("Using original blob without modification for encrypted blob")
130142
return &bpCompressionStepData{
131-
operation: bpcOpPreserveOpaque,
132-
uploadedOperation: types.PreserveOriginal,
133-
uploadedAlgorithm: nil,
134-
srcCompressorName: internalblobinfocache.UnknownCompression,
135-
uploadedCompressorName: internalblobinfocache.UnknownCompression,
143+
operation: bpcOpPreserveOpaque,
144+
uploadedOperation: types.PreserveOriginal,
145+
uploadedAlgorithm: nil,
146+
srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
147+
uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
148+
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
136149
}, nil
137150
}
138151
return nil, nil
@@ -156,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
156169
Digest: "",
157170
Size: -1,
158171
}
172+
specificVariantName := uploadedAlgorithm.Name()
173+
if specificVariantName == uploadedAlgorithm.BaseVariantName() {
174+
specificVariantName = internalblobinfocache.UnknownCompression
175+
}
159176
return &bpCompressionStepData{
160-
operation: bpcOpCompressUncompressed,
161-
uploadedOperation: types.Compress,
162-
uploadedAlgorithm: uploadedAlgorithm,
163-
uploadedAnnotations: annotations,
164-
srcCompressorName: detected.srcCompressorName,
165-
uploadedCompressorName: uploadedAlgorithm.Name(),
166-
closers: []io.Closer{reader},
177+
operation: bpcOpCompressUncompressed,
178+
uploadedOperation: types.Compress,
179+
uploadedAlgorithm: uploadedAlgorithm,
180+
uploadedAnnotations: annotations,
181+
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
182+
uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(),
183+
uploadedCompressorSpecificVariantName: specificVariantName,
184+
closers: []io.Closer{reader},
167185
}, nil
168186
}
169187
return nil, nil
@@ -196,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
196214
Digest: "",
197215
Size: -1,
198216
}
217+
specificVariantName := ic.compressionFormat.Name()
218+
if specificVariantName == ic.compressionFormat.BaseVariantName() {
219+
specificVariantName = internalblobinfocache.UnknownCompression
220+
}
199221
succeeded = true
200222
return &bpCompressionStepData{
201-
operation: bpcOpRecompressCompressed,
202-
uploadedOperation: types.PreserveOriginal,
203-
uploadedAlgorithm: ic.compressionFormat,
204-
uploadedAnnotations: annotations,
205-
srcCompressorName: detected.srcCompressorName,
206-
uploadedCompressorName: ic.compressionFormat.Name(),
207-
closers: []io.Closer{decompressed, recompressed},
223+
operation: bpcOpRecompressCompressed,
224+
uploadedOperation: types.PreserveOriginal,
225+
uploadedAlgorithm: ic.compressionFormat,
226+
uploadedAnnotations: annotations,
227+
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
228+
uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(),
229+
uploadedCompressorSpecificVariantName: specificVariantName,
230+
closers: []io.Closer{decompressed, recompressed},
208231
}, nil
209232
}
210233
return nil, nil
@@ -225,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
225248
Size: -1,
226249
}
227250
return &bpCompressionStepData{
228-
operation: bpcOpDecompressCompressed,
229-
uploadedOperation: types.Decompress,
230-
uploadedAlgorithm: nil,
231-
srcCompressorName: detected.srcCompressorName,
232-
uploadedCompressorName: internalblobinfocache.Uncompressed,
233-
closers: []io.Closer{s},
251+
operation: bpcOpDecompressCompressed,
252+
uploadedOperation: types.Decompress,
253+
uploadedAlgorithm: nil,
254+
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
255+
uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed,
256+
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
257+
closers: []io.Closer{s},
234258
}, nil
235259
}
236260
return nil, nil
@@ -268,11 +292,15 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
268292
algorithm = nil
269293
}
270294
return &bpCompressionStepData{
271-
operation: bpcOp,
272-
uploadedOperation: uploadedOp,
273-
uploadedAlgorithm: algorithm,
274-
srcCompressorName: detected.srcCompressorName,
275-
uploadedCompressorName: detected.srcCompressorName,
295+
operation: bpcOp,
296+
uploadedOperation: uploadedOp,
297+
uploadedAlgorithm: algorithm,
298+
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
299+
// We only record the base variant of the format on upload; we didn’t do anything with
300+
// the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger
301+
// reuse of any kind between the blob digest and the TOC digest.
302+
uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
303+
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
276304
}
277305
}
278306

@@ -308,6 +336,15 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
308336
// No useful information
309337
case bpcOpCompressUncompressed:
310338
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
339+
if d.uploadedAnnotations != nil {
340+
tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations)
341+
if err != nil {
342+
return fmt.Errorf("parsing just-created compression annotations: %w", err)
343+
}
344+
if tocDigest != nil {
345+
c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest)
346+
}
347+
}
311348
case bpcOpDecompressCompressed:
312349
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
313350
case bpcOpRecompressCompressed, bpcOpPreserveCompressed:
@@ -323,29 +360,27 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
323360
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
324361
}
325362
}
326-
if d.srcCompressorName == "" || d.uploadedCompressorName == "" {
327-
return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)",
328-
d.srcCompressorName, d.uploadedCompressorName)
363+
if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" {
364+
return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)",
365+
d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName)
329366
}
330-
if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
331-
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
332-
// HACK: Don’t record zstd:chunked algorithms.
333-
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
334-
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
335-
//
336-
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
337-
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
338-
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
339-
// inconsistent data to be logged.
340-
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
341-
}
367+
if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
368+
c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
369+
BaseVariantCompressor: d.uploadedCompressorBaseVariantName,
370+
SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName,
371+
SpecificVariantAnnotations: d.uploadedAnnotations,
372+
})
342373
}
343374
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
344-
d.srcCompressorName != internalblobinfocache.UnknownCompression {
345-
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
346-
// HACK: Don’t record zstd:chunked algorithms, see above.
347-
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
348-
}
375+
d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
376+
// If the source is already using some TOC-dependent variant, we either copied the
377+
// blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest,
378+
// so record neither the variant name, nor the TOC digest.
379+
c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{
380+
BaseVariantCompressor: d.srcCompressorBaseVariantName,
381+
SpecificVariantCompressor: internalblobinfocache.UnknownCompression,
382+
SpecificVariantAnnotations: nil,
383+
})
349384
}
350385
return nil
351386
}

Diff for: copy/encryption.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
4848
Annotations: stream.info.Annotations,
4949
}
5050
// DecryptLayer supposedly returns a digest of the decrypted stream.
51-
// In pratice, that value is never set in the current implementation.
51+
// In practice, that value is never set in the current implementation.
5252
// And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key,
5353
// i.e. it doesn’t authenticate the origin of the metadata in any way.
5454
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)

Diff for: copy/multiple_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ func TestPrepareCopyInstancesforInstanceCopyClone(t *testing.T) {
7777
// * Still copy gzip variants if they exist in the original
7878
// * Not create new Zstd variants if they exist in the original.
7979

80-
// We crated a list of three instances `sourceInstances` and since in oci1.index.zstd-selection.json
80+
// We created a list of three instances `sourceInstances` and since in oci1.index.zstd-selection.json
8181
// amd64 already has a zstd instance i.e sourceInstance[1] so it should not create replication for
8282
// `sourceInstance[0]` and `sourceInstance[1]` but should do it for `sourceInstance[2]` for `arm64`
8383
// and still copy `sourceInstance[2]`.
@@ -93,8 +93,8 @@ func TestPrepareCopyInstancesforInstanceCopyClone(t *testing.T) {
9393
actualResponse := convertInstanceCopyToSimplerInstanceCopy(instancesToCopy)
9494
assert.Equal(t, expectedResponse, actualResponse)
9595

96-
// Test option with multiple copy request for same compression format
97-
// above expection should stay same, if out ensureCompressionVariantsExist requests zstd twice
96+
// Test option with multiple copy request for same compression format.
97+
// The above expectation should stay the same, if ensureCompressionVariantsExist requests zstd twice.
9898
ensureCompressionVariantsExist = []OptionCompressionVariant{{Algorithm: compression.Zstd}, {Algorithm: compression.Zstd}}
9999
instancesToCopy, err = prepareInstanceCopies(list, sourceInstances, &Options{EnsureCompressionVariantsExist: ensureCompressionVariantsExist})
100100
require.NoError(t, err)

Diff for: copy/progress_bars.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
121121
}
122122
}
123123

124-
// mark100PercentComplete marks the progres bars as 100% complete;
124+
// mark100PercentComplete marks the progress bars as 100% complete;
125125
// it may do so by possibly advancing the current state if it is below the known total.
126126
func (bar *progressBar) mark100PercentComplete() {
127127
if bar.originalSize > 0 {

Diff for: copy/sign_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ func TestCreateSignatures(t *testing.T) {
116116
successfullySignedIdentity: "docker.io/library/busybox:latest",
117117
},
118118
{
119-
name: "docker:// with overidden identity",
119+
name: "docker:// with overridden identity",
120120
dest: dockerDest,
121121
identity: "myregistry.io/myrepo:mytag",
122122
successfullySignedIdentity: "myregistry.io/myrepo:mytag",

0 commit comments

Comments
 (0)