Skip to content

Commit b91c635

Browse files
WIP - Build a mirroring plan and execute it
1 parent f53168c commit b91c635

File tree

3 files changed

+626
-120
lines changed

3 files changed

+626
-120
lines changed

pkg/image/registryclient/client.go

+11
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,17 @@ type Context struct {
6868
redirect map[url.URL]*url.URL
6969
}
7070

71+
func (c *Context) Copy() *Context {
72+
copied := *c
73+
copied.authFn = nil
74+
copied.pings = make(map[url.URL]error)
75+
copied.redirect = make(map[url.URL]*url.URL)
76+
for k, v := range c.redirect {
77+
copied.redirect[k] = v
78+
}
79+
return &copied
80+
}
81+
7182
func (c *Context) WithScopes(scopes ...auth.Scope) *Context {
7283
c.authFn = nil
7384
c.Scopes = scopes

pkg/oc/cli/cmd/image/mirror/mirror.go

+65-120
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"regexp"
99
"strings"
1010
"sync"
11+
"time"
1112

1213
"github.com/docker/distribution"
1314
"github.com/docker/distribution/manifest/manifestlist"
@@ -26,7 +27,6 @@ import (
2627
"github.com/spf13/cobra"
2728
"k8s.io/client-go/rest"
2829

29-
kerrors "k8s.io/apimachinery/pkg/util/errors"
3030
apirequest "k8s.io/apiserver/pkg/endpoints/request"
3131
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
3232
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
@@ -297,95 +297,6 @@ func (o *pushOptions) Complete(args []string) error {
297297
return nil
298298
}
299299

300-
type key struct {
301-
registry string
302-
repository string
303-
}
304-
305-
type destination struct {
306-
t DestinationType
307-
ref imageapi.DockerImageReference
308-
tags []string
309-
}
310-
311-
type pushTargets map[key]destination
312-
313-
type destinations struct {
314-
ref imageapi.DockerImageReference
315-
tags map[string]pushTargets
316-
digests map[string]pushTargets
317-
}
318-
319-
func (d destinations) mergeIntoDigests(srcDigest godigest.Digest, target pushTargets) {
320-
srcKey := srcDigest.String()
321-
current, ok := d.digests[srcKey]
322-
if !ok {
323-
d.digests[srcKey] = target
324-
return
325-
}
326-
for repo, dst := range target {
327-
existing, ok := current[repo]
328-
if !ok {
329-
current[repo] = dst
330-
continue
331-
}
332-
existing.tags = append(existing.tags, dst.tags...)
333-
}
334-
}
335-
336-
type targetTree map[key]destinations
337-
338-
func buildTargetTree(mappings []Mapping) targetTree {
339-
tree := make(targetTree)
340-
for _, m := range mappings {
341-
srcKey := key{registry: m.Source.Registry, repository: m.Source.RepositoryName()}
342-
dstKey := key{registry: m.Destination.Registry, repository: m.Destination.RepositoryName()}
343-
344-
src, ok := tree[srcKey]
345-
if !ok {
346-
src.ref = m.Source.AsRepository()
347-
src.digests = make(map[string]pushTargets)
348-
src.tags = make(map[string]pushTargets)
349-
tree[srcKey] = src
350-
}
351-
352-
var current pushTargets
353-
if tag := m.Source.Tag; len(tag) != 0 {
354-
current = src.tags[tag]
355-
if current == nil {
356-
current = make(pushTargets)
357-
src.tags[tag] = current
358-
}
359-
} else {
360-
current = src.digests[m.Source.ID]
361-
if current == nil {
362-
current = make(pushTargets)
363-
src.digests[m.Source.ID] = current
364-
}
365-
}
366-
367-
dst, ok := current[dstKey]
368-
if !ok {
369-
dst.ref = m.Destination.AsRepository()
370-
dst.t = m.Type
371-
}
372-
if len(m.Destination.Tag) > 0 {
373-
dst.tags = append(dst.tags, m.Destination.Tag)
374-
}
375-
current[dstKey] = dst
376-
}
377-
return tree
378-
}
379-
380-
type retrieverError struct {
381-
src, dst imageapi.DockerImageReference
382-
err error
383-
}
384-
385-
func (e retrieverError) Error() string {
386-
return e.err.Error()
387-
}
388-
389300
func (o *pushOptions) Repository(ctx apirequest.Context, context *registryclient.Context, creds auth.CredentialStore, t DestinationType, ref imageapi.DockerImageReference) (distribution.Repository, error) {
390301
switch t {
391302
case DestinationRegistry:
@@ -414,48 +325,64 @@ func (o *pushOptions) includeDescriptor(d *manifestlist.ManifestDescriptor) bool
414325
return o.OSFilter.MatchString(fmt.Sprintf("%s/%s", d.Platform.OS, d.Platform.Architecture))
415326
}
416327

417-
// ErrAlreadyExists may be returned by the blob Create function to indicate that the blob already exists.
418-
var ErrAlreadyExists = fmt.Errorf("blob already exists in the target location")
419-
420328
func (o *pushOptions) Run() error {
329+
start := time.Now()
330+
p, err := o.plan()
331+
if err != nil {
332+
return err
333+
}
334+
fmt.Fprintf(o.ErrOut, "info: Planning complete in %s\n", time.Now().Sub(start).Round(time.Millisecond))
335+
p.Print(o.Out)
336+
337+
work := Greedy(p)
338+
for i, phase := range work.phases {
339+
fmt.Fprintf(o.ErrOut, "phase %d:\n", i)
340+
for _, unit := range phase.independent {
341+
fmt.Fprintf(o.ErrOut, " %s %s\n", unit.registry.name, unit.repository.name)
342+
fmt.Fprintf(o.ErrOut, " blobs=%d mounts=%d manifests=%d\n", unit.repository.stats.sharedCount+unit.repository.stats.uniqueCount, unit.stats.mountOpportunities, unit.repository.manifests.stats.count)
343+
}
344+
}
345+
346+
return nil
347+
}
348+
349+
func (o *pushOptions) plan() (*plan, error) {
421350
tree := buildTargetTree(o.Mappings)
422351

423352
creds := dockercredentials.NewLocal()
424353
ctx := apirequest.NewContext()
425354

426355
rt, err := rest.TransportFor(&rest.Config{})
427356
if err != nil {
428-
return err
357+
return nil, err
429358
}
430359
insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}})
431360
if err != nil {
432-
return err
361+
return nil, err
433362
}
434-
srcClient := registryclient.NewContext(rt, insecureRT).WithCredentials(creds)
363+
fromContext := registryclient.NewContext(rt, insecureRT).WithCredentials(creds)
435364
toContext := registryclient.NewContext(rt, insecureRT).WithActions("pull", "push")
436365

437-
var errs []error
366+
plan := &plan{}
367+
438368
for _, src := range tree {
439-
srcRepo, err := srcClient.Repository(ctx, src.ref.DockerClientDefaults().RegistryURL(), src.ref.RepositoryName(), o.Insecure)
369+
srcRepo, err := fromContext.Repository(ctx, src.ref.DockerClientDefaults().RegistryURL(), src.ref.RepositoryName(), o.Insecure)
440370
if err != nil {
441-
errs = append(errs, retrieverError{err: fmt.Errorf("unable to connect to %s: %v", src.ref, err), src: src.ref})
371+
plan.AddError(retrieverError{err: fmt.Errorf("unable to connect to %s: %v", src.ref, err), src: src.ref})
442372
continue
443373
}
444374

445375
manifests, err := srcRepo.Manifests(ctx)
446376
if err != nil {
447-
errs = append(errs, retrieverError{src: src.ref, err: fmt.Errorf("unable to access source image %s manifests: %v", src.ref, err)})
377+
plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to access source image %s manifests: %v", src.ref, err)})
448378
continue
449379
}
450380

451-
var tagErrs []retrieverError
452-
var digestErrs []retrieverError
453-
454381
// convert source tags to digests
455382
for srcTag, pushTargets := range src.tags {
456383
desc, err := srcRepo.Tags(ctx).Get(ctx, srcTag)
457384
if err != nil {
458-
tagErrs = append(tagErrs, retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s by tag: %v", src.ref, err)})
385+
plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s by tag: %v", src.ref, err)})
459386
continue
460387
}
461388
srcDigest := desc.Digest
@@ -470,14 +397,14 @@ func (o *pushOptions) Run() error {
470397
srcDigest := godigest.Digest(srcDigestString)
471398
srcManifest, err := manifests.Get(ctx, godigest.Digest(srcDigest), schema2ManifestOnly)
472399
if err != nil {
473-
digestErrs = append(digestErrs, retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s manifest: %v", src.ref, err)})
400+
plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s manifest: %v", src.ref, err)})
474401
continue
475402
}
476403

477404
// filter or load manifest list as appropriate
478405
srcManifests, srcManifest, srcDigest, err := processManifestList(ctx, srcDigest, srcManifest, manifests, src.ref, o.includeDescriptor)
479406
if err != nil {
480-
digestErrs = append(digestErrs, retrieverError{src: src.ref, err: err})
407+
plan.AddError(retrieverError{src: src.ref, err: err})
481408
continue
482409
}
483410
if len(srcManifests) == 0 {
@@ -488,19 +415,23 @@ func (o *pushOptions) Run() error {
488415
for _, dst := range pushTargets {
489416
// if we are going to be using cross repository mount, get a token that covers the src
490417
if src.ref.Registry == dst.ref.Registry {
491-
toContext = toContext.WithScopes(auth.RepositoryScope{Repository: src.ref.RepositoryName(), Actions: []string{"pull"}})
418+
toContext = toContext.Copy().WithScopes(auth.RepositoryScope{Repository: src.ref.RepositoryName(), Actions: []string{"pull"}})
492419
}
493420

494421
toRepo, err := o.Repository(ctx, toContext, creds, dst.t, dst.ref)
495422
if err != nil {
496-
digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to connect to %s: %v", dst.ref, err)})
423+
plan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to connect to %s: %v", dst.ref, err)})
497424
continue
498425
}
499426

500427
canonicalTo := toRepo.Named()
428+
429+
repoPlan := plan.RegistryPlan(dst.ref.Registry).RepositoryPlan(canonicalTo.String())
430+
blobPlan := repoPlan.Blobs(src.ref.AsRepository().String())
431+
501432
toManifests, err := toRepo.Manifests(ctx)
502433
if err != nil {
503-
digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to access destination image %s manifests: %v", src.ref, err)})
434+
repoPlan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to access destination image %s manifests: %v", src.ref, err)})
504435
continue
505436
}
506437

@@ -513,29 +444,43 @@ func (o *pushOptions) Run() error {
513444
default:
514445
if _, err := toManifests.Get(ctx, srcDigest); err != nil {
515446
mustCopyLayers = true
447+
blobPlan.AlreadyExists(distribution.Descriptor{Digest: srcDigest})
516448
} else {
517449
glog.V(4).Infof("Manifest exists in %s, no need to copy layers without --force", dst.ref)
518450
}
519451
}
520452

521453
if mustCopyLayers {
522-
if errs := uploadBlobs(ctx, dst, srcRepo, toRepo, srcManifests, src.ref, srcDigest, canonicalFrom, o.Force, o.SkipMount, o.ErrOut); len(errs) > 0 {
523-
digestErrs = append(digestErrs, errs...)
524-
continue
454+
// upload all the blobs
455+
toBlobs := toRepo.Blobs(ctx)
456+
srcBlobs := srcRepo.Blobs(ctx)
457+
458+
// upload the each manifest
459+
for _, srcManifest := range srcManifests {
460+
switch srcManifest.(type) {
461+
case *schema2.DeserializedManifest:
462+
case *manifestlist.DeserializedManifestList:
463+
// we do not need to upload layers in a manifestlist
464+
continue
465+
default:
466+
repoPlan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("the manifest type %T is not supported", srcManifest)})
467+
continue
468+
}
469+
for _, blob := range srcManifest.References() {
470+
// glog.V(5).Infof("copying blob %s to %s", blob.Digest, dst.ref)
471+
blobPlan.Copy(blob, srcBlobs, toBlobs)
472+
}
525473
}
526474
}
527475

528-
if errs := uploadAndTagManifests(ctx, dst, srcManifest, src.ref, toManifests, o.Out, toRepo.Blobs(ctx), canonicalTo); len(errs) > 0 {
529-
digestErrs = append(digestErrs, errs...)
530-
continue
531-
}
476+
repoPlan.Manifests().Copy(srcDigest, srcManifest, dst.tags, toManifests)
532477
}
533478
}
534-
for _, err := range append(tagErrs, digestErrs...) {
535-
errs = append(errs, err)
536-
}
537479
}
538-
return kerrors.NewAggregate(errs)
480+
481+
plan.calculateStats()
482+
483+
return plan, nil
539484
}
540485

541486
func processManifestList(ctx apirequest.Context, srcDigest godigest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imageapi.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor) bool) ([]distribution.Manifest, distribution.Manifest, godigest.Digest, error) {

0 commit comments

Comments
 (0)