Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 90c61df

Browse files
committedMar 19, 2018
WIP - Build a mirroring plan and execute it
1 parent f53168c commit 90c61df

File tree

2 files changed

+367
-104
lines changed

2 files changed

+367
-104
lines changed
 

‎pkg/oc/cli/cmd/image/mirror/mirror.go

+78-104
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ import (
2626
"github.com/spf13/cobra"
2727
"k8s.io/client-go/rest"
2828

29-
kerrors "k8s.io/apimachinery/pkg/util/errors"
3029
apirequest "k8s.io/apiserver/pkg/endpoints/request"
3130
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
3231
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
@@ -297,86 +296,6 @@ func (o *pushOptions) Complete(args []string) error {
297296
return nil
298297
}
299298

300-
type key struct {
301-
registry string
302-
repository string
303-
}
304-
305-
type destination struct {
306-
t DestinationType
307-
ref imageapi.DockerImageReference
308-
tags []string
309-
}
310-
311-
type pushTargets map[key]destination
312-
313-
type destinations struct {
314-
ref imageapi.DockerImageReference
315-
tags map[string]pushTargets
316-
digests map[string]pushTargets
317-
}
318-
319-
func (d destinations) mergeIntoDigests(srcDigest godigest.Digest, target pushTargets) {
320-
srcKey := srcDigest.String()
321-
current, ok := d.digests[srcKey]
322-
if !ok {
323-
d.digests[srcKey] = target
324-
return
325-
}
326-
for repo, dst := range target {
327-
existing, ok := current[repo]
328-
if !ok {
329-
current[repo] = dst
330-
continue
331-
}
332-
existing.tags = append(existing.tags, dst.tags...)
333-
}
334-
}
335-
336-
type targetTree map[key]destinations
337-
338-
func buildTargetTree(mappings []Mapping) targetTree {
339-
tree := make(targetTree)
340-
for _, m := range mappings {
341-
srcKey := key{registry: m.Source.Registry, repository: m.Source.RepositoryName()}
342-
dstKey := key{registry: m.Destination.Registry, repository: m.Destination.RepositoryName()}
343-
344-
src, ok := tree[srcKey]
345-
if !ok {
346-
src.ref = m.Source.AsRepository()
347-
src.digests = make(map[string]pushTargets)
348-
src.tags = make(map[string]pushTargets)
349-
tree[srcKey] = src
350-
}
351-
352-
var current pushTargets
353-
if tag := m.Source.Tag; len(tag) != 0 {
354-
current = src.tags[tag]
355-
if current == nil {
356-
current = make(pushTargets)
357-
src.tags[tag] = current
358-
}
359-
} else {
360-
current = src.digests[m.Source.ID]
361-
if current == nil {
362-
current = make(pushTargets)
363-
src.digests[m.Source.ID] = current
364-
}
365-
}
366-
367-
dst, ok := current[dstKey]
368-
if !ok {
369-
dst.ref = m.Destination.AsRepository()
370-
dst.t = m.Type
371-
}
372-
if len(m.Destination.Tag) > 0 {
373-
dst.tags = append(dst.tags, m.Destination.Tag)
374-
}
375-
current[dstKey] = dst
376-
}
377-
return tree
378-
}
379-
380299
type retrieverError struct {
381300
src, dst imageapi.DockerImageReference
382301
err error
@@ -418,44 +337,52 @@ func (o *pushOptions) includeDescriptor(d *manifestlist.ManifestDescriptor) bool
418337
var ErrAlreadyExists = fmt.Errorf("blob already exists in the target location")
419338

420339
func (o *pushOptions) Run() error {
340+
p, err := o.plan()
341+
if err != nil {
342+
return err
343+
}
344+
p.Print(o.Out)
345+
346+
return nil
347+
}
348+
349+
func (o *pushOptions) plan() (*plan, error) {
421350
tree := buildTargetTree(o.Mappings)
422351

423352
creds := dockercredentials.NewLocal()
424353
ctx := apirequest.NewContext()
425354

426355
rt, err := rest.TransportFor(&rest.Config{})
427356
if err != nil {
428-
return err
357+
return nil, err
429358
}
430359
insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}})
431360
if err != nil {
432-
return err
361+
return nil, err
433362
}
434363
srcClient := registryclient.NewContext(rt, insecureRT).WithCredentials(creds)
435364
toContext := registryclient.NewContext(rt, insecureRT).WithActions("pull", "push")
436365

437-
var errs []error
366+
plan := &plan{}
367+
438368
for _, src := range tree {
439369
srcRepo, err := srcClient.Repository(ctx, src.ref.DockerClientDefaults().RegistryURL(), src.ref.RepositoryName(), o.Insecure)
440370
if err != nil {
441-
errs = append(errs, retrieverError{err: fmt.Errorf("unable to connect to %s: %v", src.ref, err), src: src.ref})
371+
plan.AddError(retrieverError{err: fmt.Errorf("unable to connect to %s: %v", src.ref, err), src: src.ref})
442372
continue
443373
}
444374

445375
manifests, err := srcRepo.Manifests(ctx)
446376
if err != nil {
447-
errs = append(errs, retrieverError{src: src.ref, err: fmt.Errorf("unable to access source image %s manifests: %v", src.ref, err)})
377+
plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to access source image %s manifests: %v", src.ref, err)})
448378
continue
449379
}
450380

451-
var tagErrs []retrieverError
452-
var digestErrs []retrieverError
453-
454381
// convert source tags to digests
455382
for srcTag, pushTargets := range src.tags {
456383
desc, err := srcRepo.Tags(ctx).Get(ctx, srcTag)
457384
if err != nil {
458-
tagErrs = append(tagErrs, retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s by tag: %v", src.ref, err)})
385+
plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s by tag: %v", src.ref, err)})
459386
continue
460387
}
461388
srcDigest := desc.Digest
@@ -470,14 +397,14 @@ func (o *pushOptions) Run() error {
470397
srcDigest := godigest.Digest(srcDigestString)
471398
srcManifest, err := manifests.Get(ctx, godigest.Digest(srcDigest), schema2ManifestOnly)
472399
if err != nil {
473-
digestErrs = append(digestErrs, retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s manifest: %v", src.ref, err)})
400+
plan.AddError(retrieverError{src: src.ref, err: fmt.Errorf("unable to retrieve source image %s manifest: %v", src.ref, err)})
474401
continue
475402
}
476403

477404
// filter or load manifest list as appropriate
478405
srcManifests, srcManifest, srcDigest, err := processManifestList(ctx, srcDigest, srcManifest, manifests, src.ref, o.includeDescriptor)
479406
if err != nil {
480-
digestErrs = append(digestErrs, retrieverError{src: src.ref, err: err})
407+
plan.AddError(retrieverError{src: src.ref, err: err})
481408
continue
482409
}
483410
if len(srcManifests) == 0 {
@@ -493,14 +420,18 @@ func (o *pushOptions) Run() error {
493420

494421
toRepo, err := o.Repository(ctx, toContext, creds, dst.t, dst.ref)
495422
if err != nil {
496-
digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to connect to %s: %v", dst.ref, err)})
423+
plan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to connect to %s: %v", dst.ref, err)})
497424
continue
498425
}
499426

500427
canonicalTo := toRepo.Named()
428+
429+
repoPlan := plan.RegistryPlan(dst.ref.Registry).RepositoryPlan(canonicalTo.String())
430+
blobPlan := repoPlan.Blobs(src.ref.String())
431+
501432
toManifests, err := toRepo.Manifests(ctx)
502433
if err != nil {
503-
digestErrs = append(digestErrs, retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to access destination image %s manifests: %v", src.ref, err)})
434+
repoPlan.AddError(retrieverError{src: src.ref, dst: dst.ref, err: fmt.Errorf("unable to access destination image %s manifests: %v", src.ref, err)})
504435
continue
505436
}
506437

@@ -513,29 +444,24 @@ func (o *pushOptions) Run() error {
513444
default:
514445
if _, err := toManifests.Get(ctx, srcDigest); err != nil {
515446
mustCopyLayers = true
447+
blobPlan.AlreadyExists(distribution.Descriptor{Digest: srcDigest})
516448
} else {
517449
glog.V(4).Infof("Manifest exists in %s, no need to copy layers without --force", dst.ref)
518450
}
519451
}
520452

521453
if mustCopyLayers {
522-
if errs := uploadBlobs(ctx, dst, srcRepo, toRepo, srcManifests, src.ref, srcDigest, canonicalFrom, o.Force, o.SkipMount, o.ErrOut); len(errs) > 0 {
523-
digestErrs = append(digestErrs, errs...)
454+
if errs := planBlobs(ctx, blobPlan, dst, srcRepo, toRepo, srcManifests, src.ref, o.Force); len(errs) > 0 {
455+
repoPlan.AddError(errs...)
524456
continue
525457
}
526458
}
527459

528-
if errs := uploadAndTagManifests(ctx, dst, srcManifest, src.ref, toManifests, o.Out, toRepo.Blobs(ctx), canonicalTo); len(errs) > 0 {
529-
digestErrs = append(digestErrs, errs...)
530-
continue
531-
}
460+
repoPlan.Manifests().Copy(srcDigest, srcManifest, dst.tags, toManifests)
532461
}
533462
}
534-
for _, err := range append(tagErrs, digestErrs...) {
535-
errs = append(errs, err)
536-
}
537463
}
538-
return kerrors.NewAggregate(errs)
464+
return plan, nil
539465
}
540466

541467
func processManifestList(ctx apirequest.Context, srcDigest godigest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imageapi.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor) bool) ([]distribution.Manifest, distribution.Manifest, godigest.Digest, error) {
@@ -601,6 +527,54 @@ func processManifestList(ctx apirequest.Context, srcDigest godigest.Digest, srcM
601527
}
602528
}
603529

530+
func planBlobs(
531+
ctx apirequest.Context,
532+
plan *repositoryBlobCopy,
533+
dst destination,
534+
srcRepo, toRepo distribution.Repository,
535+
srcManifests []distribution.Manifest,
536+
srcRef imageapi.DockerImageReference,
537+
force bool,
538+
) []error {
539+
540+
// upload all the blobs
541+
toBlobs := toRepo.Blobs(ctx)
542+
srcBlobs := srcRepo.Blobs(ctx)
543+
544+
var errs []error
545+
546+
// upload the each manifest
547+
for _, srcManifest := range srcManifests {
548+
switch srcManifest.(type) {
549+
case *schema2.DeserializedManifest:
550+
case *manifestlist.DeserializedManifestList:
551+
// we do not need to upload layers in a manifestlist
552+
continue
553+
default:
554+
errs = append(errs, retrieverError{src: srcRef, dst: dst.ref, err: fmt.Errorf("the manifest type %T is not supported", srcManifest)})
555+
continue
556+
}
557+
558+
for _, blob := range srcManifest.References() {
559+
// if we aren't forcing upload, skip the blob copy
560+
if !force {
561+
_, err := toBlobs.Stat(ctx, blob.Digest)
562+
if err == nil {
563+
// blob exists, skip
564+
plan.AlreadyExists(blob)
565+
glog.V(5).Infof("Server reports blob exists %#v", blob)
566+
continue
567+
}
568+
if err != distribution.ErrBlobUnknown {
569+
glog.V(5).Infof("Server was unable to check whether blob exists %s: %v", blob.Digest, err)
570+
}
571+
}
572+
plan.Copy(blob, srcBlobs, toBlobs)
573+
}
574+
}
575+
return errs
576+
}
577+
604578
func uploadBlobs(
605579
ctx apirequest.Context,
606580
dst destination,

‎pkg/oc/cli/cmd/image/mirror/plan.go

+289
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,289 @@
1+
package mirror
2+
3+
import (
4+
"fmt"
5+
"io"
6+
7+
"github.com/docker/distribution"
8+
units "github.com/docker/go-units"
9+
godigest "github.com/opencontainers/go-digest"
10+
imageapi "github.com/openshift/origin/pkg/image/apis/image"
11+
"k8s.io/apimachinery/pkg/util/sets"
12+
)
13+
14+
type key struct {
15+
registry string
16+
repository string
17+
}
18+
19+
type destination struct {
20+
t DestinationType
21+
ref imageapi.DockerImageReference
22+
tags []string
23+
}
24+
25+
type pushTargets map[key]destination
26+
27+
type destinations struct {
28+
ref imageapi.DockerImageReference
29+
tags map[string]pushTargets
30+
digests map[string]pushTargets
31+
}
32+
33+
func (d destinations) mergeIntoDigests(srcDigest godigest.Digest, target pushTargets) {
34+
srcKey := srcDigest.String()
35+
current, ok := d.digests[srcKey]
36+
if !ok {
37+
d.digests[srcKey] = target
38+
return
39+
}
40+
for repo, dst := range target {
41+
existing, ok := current[repo]
42+
if !ok {
43+
current[repo] = dst
44+
continue
45+
}
46+
existing.tags = append(existing.tags, dst.tags...)
47+
}
48+
}
49+
50+
type targetTree map[key]destinations
51+
52+
func buildTargetTree(mappings []Mapping) targetTree {
53+
tree := make(targetTree)
54+
for _, m := range mappings {
55+
srcKey := key{registry: m.Source.Registry, repository: m.Source.RepositoryName()}
56+
dstKey := key{registry: m.Destination.Registry, repository: m.Destination.RepositoryName()}
57+
58+
src, ok := tree[srcKey]
59+
if !ok {
60+
src.ref = m.Source.AsRepository()
61+
src.digests = make(map[string]pushTargets)
62+
src.tags = make(map[string]pushTargets)
63+
tree[srcKey] = src
64+
}
65+
66+
var current pushTargets
67+
if tag := m.Source.Tag; len(tag) != 0 {
68+
current = src.tags[tag]
69+
if current == nil {
70+
current = make(pushTargets)
71+
src.tags[tag] = current
72+
}
73+
} else {
74+
current = src.digests[m.Source.ID]
75+
if current == nil {
76+
current = make(pushTargets)
77+
src.digests[m.Source.ID] = current
78+
}
79+
}
80+
81+
dst, ok := current[dstKey]
82+
if !ok {
83+
dst.ref = m.Destination.AsRepository()
84+
dst.t = m.Type
85+
}
86+
if len(m.Destination.Tag) > 0 {
87+
dst.tags = append(dst.tags, m.Destination.Tag)
88+
}
89+
current[dstKey] = dst
90+
}
91+
return tree
92+
}
93+
94+
type plan struct {
95+
registries map[string]*registryPlan
96+
errs []error
97+
blobs map[godigest.Digest]distribution.Descriptor
98+
}
99+
100+
func (p *plan) AddError(errs ...error) {
101+
p.errs = append(p.errs, errs...)
102+
}
103+
104+
func (p *plan) RegistryPlan(name string) *registryPlan {
105+
if p.registries == nil {
106+
p.registries = make(map[string]*registryPlan)
107+
}
108+
plan, ok := p.registries[name]
109+
if ok {
110+
return plan
111+
}
112+
plan = &registryPlan{
113+
parent: p,
114+
}
115+
p.registries[name] = plan
116+
return plan
117+
}
118+
119+
func (p *plan) cache(blob distribution.Descriptor) {
120+
if p.blobs == nil {
121+
p.blobs = make(map[godigest.Digest]distribution.Descriptor)
122+
}
123+
if existing, ok := p.blobs[blob.Digest]; ok && existing.Size > 0 {
124+
return
125+
}
126+
p.blobs[blob.Digest] = blob
127+
}
128+
129+
func (p *plan) RegistryNames() sets.String {
130+
names := sets.NewString()
131+
for name, _ := range p.registries {
132+
names.Insert(name)
133+
}
134+
return names
135+
}
136+
137+
func (p *plan) Print(w io.Writer) {
138+
for _, err := range p.errs {
139+
fmt.Fprintf(w, "error: %s\n", err)
140+
}
141+
names := p.RegistryNames()
142+
for _, name := range names.List() {
143+
r := p.registries[name]
144+
for repoName, repo := range r.repositories {
145+
fmt.Fprintf(w, "target: %s/%s\n", name, repoName)
146+
for _, err := range repo.errs {
147+
fmt.Fprintf(w, "\terror: %s\n", err)
148+
}
149+
for _, blob := range repo.blobCopies {
150+
fmt.Fprintf(w, "\tblobs:\n")
151+
for _, s := range blob.blobs.List() {
152+
if size := p.blobs[godigest.Digest(s)].Size; size > 0 {
153+
fmt.Fprintf(w, "\t\t%s %s %s\n", blob.name, s, units.BytesSize(float64(size)))
154+
} else {
155+
fmt.Fprintf(w, "\t\t%s %s\n", blob.name, s)
156+
}
157+
}
158+
}
159+
fmt.Fprintf(w, "\tmanifests:\n")
160+
for _, s := range repo.manifests.digestCopies {
161+
fmt.Fprintf(w, "\t\t%s\n", s)
162+
}
163+
for _, digest := range repo.manifests.inputDigests().List() {
164+
tags := repo.manifests.digestsToTags[godigest.Digest(digest)]
165+
for _, s := range tags.List() {
166+
fmt.Fprintf(w, "\t\t%s -> %s\n", digest, s)
167+
}
168+
}
169+
}
170+
}
171+
}
172+
173+
type registryPlan struct {
174+
parent *plan
175+
repositories map[string]*repositoryPlan
176+
}
177+
178+
func (p *registryPlan) RepositoryPlan(name string) *repositoryPlan {
179+
if p.repositories == nil {
180+
p.repositories = make(map[string]*repositoryPlan)
181+
}
182+
plan, ok := p.repositories[name]
183+
if ok {
184+
return plan
185+
}
186+
plan = &repositoryPlan{
187+
parent: p,
188+
existingBlobs: sets.NewString(),
189+
}
190+
p.repositories[name] = plan
191+
return plan
192+
}
193+
194+
type repositoryPlan struct {
195+
parent *registryPlan
196+
197+
existingBlobs sets.String
198+
199+
blobCopies []repositoryBlobCopy
200+
manifests *repositoryManifestPlan
201+
202+
errs []error
203+
}
204+
205+
func (p *repositoryPlan) AddError(errs ...error) {
206+
p.errs = append(p.errs, errs...)
207+
}
208+
209+
func (p *repositoryPlan) Blobs(source string) *repositoryBlobCopy {
210+
for i := range p.blobCopies {
211+
if p.blobCopies[i].name == source {
212+
return &p.blobCopies[i]
213+
}
214+
}
215+
p.blobCopies = append(p.blobCopies, repositoryBlobCopy{
216+
parent: p,
217+
name: source,
218+
blobs: sets.NewString(),
219+
})
220+
return &p.blobCopies[len(p.blobCopies)-1]
221+
}
222+
223+
func (p *repositoryPlan) Manifests() *repositoryManifestPlan {
224+
if p.manifests == nil {
225+
p.manifests = &repositoryManifestPlan{
226+
parent: p,
227+
digestsToTags: make(map[godigest.Digest]sets.String),
228+
digestCopies: sets.NewString(),
229+
}
230+
}
231+
return p.manifests
232+
}
233+
234+
type repositoryBlobCopy struct {
235+
parent *repositoryPlan
236+
name string
237+
from distribution.BlobService
238+
to distribution.BlobService
239+
blobs sets.String
240+
}
241+
242+
func (p *repositoryBlobCopy) AlreadyExists(blob distribution.Descriptor) {
243+
p.parent.parent.parent.cache(blob)
244+
p.parent.existingBlobs.Insert(blob.Digest.String())
245+
p.blobs.Delete(blob.Digest.String())
246+
}
247+
248+
func (p *repositoryBlobCopy) Copy(blob distribution.Descriptor, from, to distribution.BlobService) {
249+
if p.from == nil {
250+
p.from = from
251+
}
252+
if p.to == nil {
253+
p.to = to
254+
}
255+
p.parent.parent.parent.cache(blob)
256+
p.blobs.Insert(blob.Digest.String())
257+
}
258+
259+
type repositoryManifestPlan struct {
260+
parent *repositoryPlan
261+
from map[godigest.Digest]distribution.Manifest
262+
to distribution.ManifestService
263+
digestsToTags map[godigest.Digest]sets.String
264+
digestCopies sets.String
265+
}
266+
267+
func (p *repositoryManifestPlan) Copy(srcDigest godigest.Digest, srcManifest distribution.Manifest, tags []string, to distribution.ManifestService) {
268+
if p.to == nil {
269+
p.to = to
270+
}
271+
if len(tags) == 0 {
272+
p.digestCopies.Insert(srcDigest.String())
273+
return
274+
}
275+
allTags := p.digestsToTags[srcDigest]
276+
if allTags == nil {
277+
allTags = sets.NewString()
278+
p.digestsToTags[srcDigest] = allTags
279+
}
280+
allTags.Insert(tags...)
281+
}
282+
283+
func (p *repositoryManifestPlan) inputDigests() sets.String {
284+
names := sets.NewString()
285+
for digest := range p.digestsToTags {
286+
names.Insert(digest.String())
287+
}
288+
return names
289+
}

0 commit comments

Comments
 (0)
Please sign in to comment.