From 3dd13d9d1bbc721965d6fca36c3b17f779798399 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sun, 24 Jun 2018 23:41:10 -0400 Subject: [PATCH 1/7] Allow the -delete-namespace flag to work on origin tests Regressed when we started using our own logic. --- test/extended/util/cli.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/extended/util/cli.go b/test/extended/util/cli.go index 2f35772460d3..3f5aa7074d84 100644 --- a/test/extended/util/cli.go +++ b/test/extended/util/cli.go @@ -191,7 +191,7 @@ func (c *CLI) TeardownProject() { if len(c.configPath) > 0 { os.Remove(c.configPath) } - if len(c.namespacesToDelete) > 0 { + if e2e.TestContext.DeleteNamespace && len(c.namespacesToDelete) > 0 { timeout := e2e.DefaultNamespaceDeletionTimeout if c.kubeFramework.NamespaceDeletionTimeout != 0 { timeout = c.kubeFramework.NamespaceDeletionTimeout From fb2ae7f12b828a6a4ff6fb5a970de04c5e2e7d3e Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sat, 5 Aug 2017 22:19:16 -0400 Subject: [PATCH 2/7] Track OS information for images, prune status tags Unify location of some base layer constants --- pkg/image/apis/image/dockertypes.go | 1 + pkg/image/dockerlayer/dockerlayer.go | 19 +++++++++++++++++++ pkg/image/registry/imagestream/strategy.go | 11 +++++++++++ pkg/image/registry/imagestreamtag/rest.go | 3 +++ pkg/oc/admin/top/graph.go | 8 ++------ pkg/oc/admin/top/images_test.go | 5 +++-- 6 files changed, 39 insertions(+), 8 deletions(-) create mode 100644 pkg/image/dockerlayer/dockerlayer.go diff --git a/pkg/image/apis/image/dockertypes.go b/pkg/image/apis/image/dockertypes.go index 1db3914cdbe4..af1adba79ddd 100644 --- a/pkg/image/apis/image/dockertypes.go +++ b/pkg/image/apis/image/dockertypes.go @@ -139,6 +139,7 @@ type DockerImageConfig struct { Size int64 `json:"size,omitempty"` RootFS *DockerConfigRootFS `json:"rootfs,omitempty"` History []DockerConfigHistory `json:"history,omitempty"` + OS string `json:"os,omitempty"` OSVersion string `json:"os.version,omitempty"` OSFeatures []string `json:"os.features,omitempty"` } diff --git a/pkg/image/dockerlayer/dockerlayer.go b/pkg/image/dockerlayer/dockerlayer.go new file mode 100644 index 000000000000..9762968e9972 --- /dev/null +++ b/pkg/image/dockerlayer/dockerlayer.go @@ -0,0 +1,19 @@ +package dockerlayer + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +const ( + // GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer + GzippedEmptyLayerDigest = "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + // EmptyLayerDiffID is the tarsum of the GzippedEmptyLayer + EmptyLayerDiffID = "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) diff --git a/pkg/image/registry/imagestream/strategy.go b/pkg/image/registry/imagestream/strategy.go index 82a198ac0de1..5f185ffb9da8 100644 --- a/pkg/image/registry/imagestream/strategy.go +++ b/pkg/image/registry/imagestream/strategy.go @@ -67,6 +67,15 @@ func (s Strategy) NamespaceScoped() bool { return true } +// collapseEmptyStatusTags removes status tags that are completely empty. +func collapseEmptyStatusTags(stream *imageapi.ImageStream) { + for tag, ref := range stream.Status.Tags { + if len(ref.Items) == 0 && len(ref.Conditions) == 0 { + delete(stream.Status.Tags, tag) + } + } +} + // PrepareForCreate clears fields that are not allowed to be set by end users on creation. func (s Strategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { stream := obj.(*imageapi.ImageStream) @@ -79,6 +88,7 @@ func (s Strategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { ref.Generation = &stream.Generation stream.Spec.Tags[tag] = ref } + collapseEmptyStatusTags(stream) } // Validate validates a new image stream and verifies the current user is @@ -511,6 +521,7 @@ func (s Strategy) prepareForUpdate(obj, old runtime.Object, resetStatus bool) { oldStream := old.(*imageapi.ImageStream) stream := obj.(*imageapi.ImageStream) + collapseEmptyStatusTags(stream) stream.Generation = oldStream.Generation if resetStatus { stream.Status = oldStream.Status diff --git a/pkg/image/registry/imagestreamtag/rest.go b/pkg/image/registry/imagestreamtag/rest.go index de9a3eb82c3f..c18857024711 100644 --- a/pkg/image/registry/imagestreamtag/rest.go +++ b/pkg/image/registry/imagestreamtag/rest.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/golang/glog" + kapierrors "k8s.io/apimachinery/pkg/api/errors" metainternal "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -389,6 +391,7 @@ func newISTag(tag string, imageStream *imageapi.ImageStream, image *imageapi.Ima event := imageapi.LatestTaggedImage(imageStream, tag) if event == nil || len(event.Image) == 0 { if !allowEmptyEvent { + glog.V(4).Infof("did not find tag %s in image stream status tags: %#v", tag, imageStream.Status.Tags) return nil, kapierrors.NewNotFound(imageapi.Resource("imagestreamtags"), istagName) } event = &imageapi.TagEvent{ diff --git a/pkg/oc/admin/top/graph.go b/pkg/oc/admin/top/graph.go index 8948e9412770..624f4c163c2a 100644 --- a/pkg/oc/admin/top/graph.go +++ b/pkg/oc/admin/top/graph.go @@ -7,6 +7,7 @@ import ( kapi "k8s.io/kubernetes/pkg/apis/core" imageapi "github.com/openshift/origin/pkg/image/apis/image" + "github.com/openshift/origin/pkg/image/dockerlayer" "github.com/openshift/origin/pkg/oc/graph/genericgraph" imagegraph "github.com/openshift/origin/pkg/oc/graph/imagegraph/nodes" kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" @@ -19,11 +20,6 @@ const ( HistoricImageStreamImageEdgeKind = "HistoricImageStreamImage" PodImageEdgeKind = "PodImage" ParentImageEdgeKind = "ParentImage" - - // digestSha256EmptyTar is the canonical sha256 digest of empty data - digestSHA256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - // digest.DigestSha256EmptyTar is empty layer digest, whereas this is gzipped digest of empty layer - digestSHA256GzippedEmptyTar = "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" ) func getImageNodes(nodes []gonum.Node) []*imagegraph.ImageNode { @@ -53,7 +49,7 @@ func addImagesToGraph(g genericgraph.Graph, images *imageapi.ImageList) { layer := image.DockerImageLayers[i] layerNode := imagegraph.EnsureImageComponentLayerNode(g, layer.Name) edgeKind := ImageLayerEdgeKind - if !topLayerAdded && layer.Name != digestSHA256EmptyTar && layer.Name != digestSHA256GzippedEmptyTar { + if !topLayerAdded && layer.Name != dockerlayer.DigestSha256EmptyTar && layer.Name != dockerlayer.GzippedEmptyLayerDigest { edgeKind = ImageTopLayerEdgeKind topLayerAdded = true } diff --git a/pkg/oc/admin/top/images_test.go b/pkg/oc/admin/top/images_test.go index a2e10bc22ac5..a0fbadae709a 100644 --- a/pkg/oc/admin/top/images_test.go +++ b/pkg/oc/admin/top/images_test.go @@ -9,6 +9,7 @@ import ( appsapi "github.com/openshift/origin/pkg/apps/apis/apps" buildapi "github.com/openshift/origin/pkg/build/apis/build" imageapi "github.com/openshift/origin/pkg/image/apis/image" + "github.com/openshift/origin/pkg/image/dockerlayer" ) func TestImagesTop(t *testing.T) { @@ -298,7 +299,7 @@ func TestImagesTop(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "image2"}, DockerImageLayers: []imageapi.ImageLayer{ {Name: "layer1"}, - {Name: digestSHA256EmptyTar}, + {Name: dockerlayer.DigestSha256EmptyTar}, {Name: "layer2"}, }, DockerImageManifest: "non empty metadata", @@ -336,7 +337,7 @@ func TestImagesTop(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "image2"}, DockerImageLayers: []imageapi.ImageLayer{ {Name: "layer1"}, - {Name: digestSHA256GzippedEmptyTar}, + {Name: dockerlayer.GzippedEmptyLayerDigest}, {Name: "layer2"}, }, DockerImageManifest: "non empty metadata", From 54636b8b93a435b165de23fa4efee9a43fef5d35 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sun, 17 Jun 2018 16:59:52 -0400 Subject: [PATCH 3/7] Split fundamental docker types --- hack/import-restrictions.json | 9 +- pkg/image/apis/image/docker10/doc.go | 3 +- pkg/image/apis/image/docker10/register.go | 23 -- pkg/image/apis/image/docker10/types.go | 156 +++++++++ pkg/image/apis/image/dockertypes.go | 305 +++++++++-------- pkg/image/apis/image/zz_generated.deepcopy.go | 317 ------------------ pkg/image/importer/dockerv1client/client.go | 5 +- pkg/image/util/helpers.go | 37 +- 8 files changed, 350 insertions(+), 505 deletions(-) delete mode 100644 pkg/image/apis/image/docker10/register.go create mode 100644 pkg/image/apis/image/docker10/types.go diff --git a/hack/import-restrictions.json b/hack/import-restrictions.json index 56c9b0c2a640..ddec39aae35e 100644 --- a/hack/import-restrictions.json +++ b/hack/import-restrictions.json @@ -148,10 +148,17 @@ ] }, + { + "checkedPackages": [ + "github.com/openshift/origin/pkg/image/apis/image/docker10" + ], + "allowedImportPackageRoots": [], + "allowedImportPackages": [] + }, + { "checkedPackages": [ "github.com/openshift/origin/pkg/image/apis/image", - "github.com/openshift/origin/pkg/image/apis/image/docker10", "github.com/openshift/origin/pkg/image/apis/image/dockerpre012", "github.com/openshift/origin/pkg/image/apis/image/v1" ], diff --git a/pkg/image/apis/image/docker10/doc.go b/pkg/image/apis/image/docker10/doc.go index b608555764a7..ec75ae477b7f 100644 --- a/pkg/image/apis/image/docker10/doc.go +++ b/pkg/image/apis/image/docker10/doc.go @@ -1,2 +1,3 @@ -// Package docker10 is the docker10 version of the API. +// Package docker10 provides types used by docker/distribution and moby/moby. +// This package takes no dependency on external types. package docker10 diff --git a/pkg/image/apis/image/docker10/register.go b/pkg/image/apis/image/docker10/register.go deleted file mode 100644 index 8704f9499410..000000000000 --- a/pkg/image/apis/image/docker10/register.go +++ /dev/null @@ -1,23 +0,0 @@ -package docker10 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - GroupName = "image.openshift.io" - LegacyGroupName = "" -) - -// SchemeGroupVersion is group version used to register these objects -var ( - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"} - LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"} - - SchemeBuilder = runtime.NewSchemeBuilder() - LegacySchemeBuilder = runtime.NewSchemeBuilder() - - AddToScheme = SchemeBuilder.AddToScheme - AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme -) diff --git a/pkg/image/apis/image/docker10/types.go b/pkg/image/apis/image/docker10/types.go new file mode 100644 index 000000000000..4f30520ad849 --- /dev/null +++ b/pkg/image/apis/image/docker10/types.go @@ -0,0 +1,156 @@ +package docker10 + +import ( + "time" +) + +// DockerImage is the type representing a docker image and its various properties when +// retrieved from the Docker client API. +type DockerImage struct { + ID string `json:"Id"` + Parent string `json:"Parent,omitempty"` + Comment string `json:"Comment,omitempty"` + Created time.Time `json:"Created,omitempty"` + Container string `json:"Container,omitempty"` + ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty"` + Config *DockerConfig `json:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest string `json:"digest,omitempty"` +} + +// DockerImageManifest represents the Docker v2 image format. +type DockerImageManifest struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType,omitempty"` + + // schema1 + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []DockerFSLayer `json:"fsLayers"` + History []DockerHistory `json:"history"` + + // schema2 + Layers []Descriptor `json:"layers"` + Config Descriptor `json:"config"` +} + +// DockerFSLayer is a container struct for BlobSums defined in an image manifest +type DockerFSLayer struct { + // DockerBlobSum is the tarsum of the referenced filesystem image layer + // TODO make this digest.Digest once docker/distribution is in Godeps + DockerBlobSum string `json:"blobSum"` +} + +// DockerHistory stores unstructured v1 compatibility information +type DockerHistory struct { + // DockerV1Compatibility is the raw v1 compatibility information + DockerV1Compatibility string `json:"v1Compatibility"` +} + +// DockerV1CompatibilityImage represents the structured v1 +// compatibility information. +type DockerV1CompatibilityImage struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// DockerV1CompatibilityImageSize represents the structured v1 +// compatibility information for size +type DockerV1CompatibilityImageSize struct { + Size int64 `json:"size,omitempty"` +} + +// DockerImageConfig stores the image configuration +type DockerImageConfig struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` + RootFS *DockerConfigRootFS `json:"rootfs,omitempty"` + History []DockerConfigHistory `json:"history,omitempty"` + OS string `json:"os,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// DockerConfigHistory stores build commands that were used to create an image +type DockerConfigHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// DockerConfigRootFS describes images root filesystem +type DockerConfigRootFS struct { + Type string `json:"type"` + DiffIDs []string `json:"diff_ids,omitempty"` +} diff --git a/pkg/image/apis/image/dockertypes.go b/pkg/image/apis/image/dockertypes.go index af1adba79ddd..bc5e10ff6d6f 100644 --- a/pkg/image/apis/image/dockertypes.go +++ b/pkg/image/apis/image/dockertypes.go @@ -2,159 +2,190 @@ package image import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/api/image/docker10" + public "github.com/openshift/origin/pkg/image/apis/image/docker10" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DockerImage is the type representing a docker image and its various properties when // retrieved from the Docker client API. -type DockerImage struct { - metav1.TypeMeta `json:",inline"` - - ID string `json:"Id"` - Parent string `json:"Parent,omitempty"` - Comment string `json:"Comment,omitempty"` - Created metav1.Time `json:"Created,omitempty"` - Container string `json:"Container,omitempty"` - ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"` - DockerVersion string `json:"DockerVersion,omitempty"` - Author string `json:"Author,omitempty"` - Config *DockerConfig `json:"Config,omitempty"` - Architecture string `json:"Architecture,omitempty"` - Size int64 `json:"Size,omitempty"` -} +type DockerImage = docker10.DockerImage // DockerConfig is the list of configuration options used when creating a container. -type DockerConfig struct { - Hostname string `json:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty"` - User string `json:"User,omitempty"` - Memory int64 `json:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty"` - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - Tty bool `json:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty"` - Env []string `json:"Env,omitempty"` - Cmd []string `json:"Cmd,omitempty"` - DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty"` - VolumesFrom string `json:"VolumesFrom,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - Entrypoint []string `json:"Entrypoint,omitempty"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty"` - SecurityOpts []string `json:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty"` - Labels map[string]string `json:"Labels,omitempty"` -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest string `json:"digest,omitempty"` -} - -// DockerImageManifest represents the Docker v2 image format. -type DockerImageManifest struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType,omitempty"` - - // schema1 - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []DockerFSLayer `json:"fsLayers"` - History []DockerHistory `json:"history"` - - // schema2 - Layers []Descriptor `json:"layers"` - Config Descriptor `json:"config"` -} - -// DockerFSLayer is a container struct for BlobSums defined in an image manifest -type DockerFSLayer struct { - // DockerBlobSum is the tarsum of the referenced filesystem image layer - // TODO make this digest.Digest once docker/distribution is in Godeps - DockerBlobSum string `json:"blobSum"` -} - -// DockerHistory stores unstructured v1 compatibility information -type DockerHistory struct { - // DockerV1Compatibility is the raw v1 compatibility information - DockerV1Compatibility string `json:"v1Compatibility"` +type DockerConfig = docker10.DockerConfig + +// Convert_public_to_api_DockerImage ensures that out has all of the fields set from in or returns +// an error. +func Convert_public_to_api_DockerImage(in *public.DockerImage, out *docker10.DockerImage) error { + *out = docker10.DockerImage{ + ID: in.ID, + Parent: in.Parent, + Comment: in.Comment, + Created: metav1.Time{Time: in.Created}, + Container: in.Container, + DockerVersion: in.DockerVersion, + Author: in.Author, + Architecture: in.Architecture, + Size: in.Size, + } + if err := Convert_public_to_api_DockerConfig(&in.ContainerConfig, &out.ContainerConfig); err != nil { + return err + } + if in.Config != nil { + out.Config = &docker10.DockerConfig{} + if err := Convert_public_to_api_DockerConfig(in.Config, out.Config); err != nil { + return err + } + } + return nil } -// DockerV1CompatibilityImage represents the structured v1 -// compatibility information. -type DockerV1CompatibilityImage struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created metav1.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig DockerConfig `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *DockerConfig `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` +// Convert_imageconfig_to_api_DockerImage takes a Docker registry digest (schema 2.1) and converts it +// to the external API version of Image. +func Convert_compatibility_to_api_DockerImage(in *public.DockerV1CompatibilityImage, out *docker10.DockerImage) error { + *out = docker10.DockerImage{ + ID: in.ID, + Parent: in.Parent, + Comment: in.Comment, + Created: metav1.Time{Time: in.Created}, + Container: in.Container, + DockerVersion: in.DockerVersion, + Author: in.Author, + Architecture: in.Architecture, + Size: in.Size, + } + if err := Convert_public_to_api_DockerConfig(&in.ContainerConfig, &out.ContainerConfig); err != nil { + return err + } + if in.Config != nil { + out.Config = &docker10.DockerConfig{} + if err := Convert_public_to_api_DockerConfig(in.Config, out.Config); err != nil { + return err + } + } + return nil } -// DockerV1CompatibilityImageSize represents the structured v1 -// compatibility information for size -type DockerV1CompatibilityImageSize struct { - Size int64 `json:"size,omitempty"` +// Convert_imageconfig_to_api_DockerImage takes a Docker registry digest (schema 2.2) and converts it +// to the external API version of Image. +func Convert_imageconfig_to_api_DockerImage(in *public.DockerImageConfig, out *docker10.DockerImage) error { + *out = docker10.DockerImage{ + ID: in.ID, + Parent: in.Parent, + Comment: in.Comment, + Created: metav1.Time{Time: in.Created}, + Container: in.Container, + DockerVersion: in.DockerVersion, + Author: in.Author, + Architecture: in.Architecture, + Size: in.Size, + } + if err := Convert_public_to_api_DockerConfig(&in.ContainerConfig, &out.ContainerConfig); err != nil { + return err + } + if in.Config != nil { + out.Config = &docker10.DockerConfig{} + if err := Convert_public_to_api_DockerConfig(in.Config, out.Config); err != nil { + return err + } + } + return nil } -// DockerImageConfig stores the image configuration -type DockerImageConfig struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created metav1.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig DockerConfig `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *DockerConfig `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` - RootFS *DockerConfigRootFS `json:"rootfs,omitempty"` - History []DockerConfigHistory `json:"history,omitempty"` - OS string `json:"os,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` +// Convert_api_to_public_DockerImage ensures that out has all of the fields set from in or returns +// an error. +func Convert_api_to_public_DockerImage(in *docker10.DockerImage, out *public.DockerImage) error { + *out = public.DockerImage{ + ID: in.ID, + Parent: in.Parent, + Comment: in.Comment, + Created: in.Created.Time, + Container: in.Container, + DockerVersion: in.DockerVersion, + Author: in.Author, + Architecture: in.Architecture, + Size: in.Size, + } + if err := Convert_api_to_public_DockerConfig(&in.ContainerConfig, &out.ContainerConfig); err != nil { + return err + } + if in.Config != nil { + out.Config = &public.DockerConfig{} + if err := Convert_api_to_public_DockerConfig(in.Config, out.Config); err != nil { + return err + } + } + return nil } -// DockerConfigHistory stores build commands that were used to create an image -type DockerConfigHistory struct { - Created metav1.Time `json:"created"` - Author string `json:"author,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` +// Convert_public_to_api_DockerConfig ensures that out has all of the fields set from in or returns +// an error. +func Convert_public_to_api_DockerConfig(in *public.DockerConfig, out *docker10.DockerConfig) error { + *out = docker10.DockerConfig{ + Hostname: in.Hostname, + Domainname: in.Domainname, + User: in.User, + Memory: in.Memory, + MemorySwap: in.MemorySwap, + CPUShares: in.CPUShares, + CPUSet: in.CPUSet, + AttachStdin: in.AttachStdin, + AttachStdout: in.AttachStdout, + AttachStderr: in.AttachStderr, + PortSpecs: in.PortSpecs, + ExposedPorts: in.ExposedPorts, + Tty: in.Tty, + OpenStdin: in.OpenStdin, + StdinOnce: in.StdinOnce, + Env: in.Env, + Cmd: in.Cmd, + DNS: in.DNS, + Image: in.Image, + Volumes: in.Volumes, + VolumesFrom: in.VolumesFrom, + WorkingDir: in.WorkingDir, + Entrypoint: in.Entrypoint, + NetworkDisabled: in.NetworkDisabled, + SecurityOpts: in.SecurityOpts, + OnBuild: in.OnBuild, + Labels: in.Labels, + } + return nil } -// DockerConfigRootFS describes images root filesystem -type DockerConfigRootFS struct { - Type string `json:"type"` - DiffIDs []string `json:"diff_ids,omitempty"` +// Convert_api_to_public_DockerConfig ensures that out has all of the fields set from in or returns +// an error. +func Convert_api_to_public_DockerConfig(in *docker10.DockerConfig, out *public.DockerConfig) error { + *out = public.DockerConfig{ + Hostname: in.Hostname, + Domainname: in.Domainname, + User: in.User, + Memory: in.Memory, + MemorySwap: in.MemorySwap, + CPUShares: in.CPUShares, + CPUSet: in.CPUSet, + AttachStdin: in.AttachStdin, + AttachStdout: in.AttachStdout, + AttachStderr: in.AttachStderr, + PortSpecs: in.PortSpecs, + ExposedPorts: in.ExposedPorts, + Tty: in.Tty, + OpenStdin: in.OpenStdin, + StdinOnce: in.StdinOnce, + Env: in.Env, + Cmd: in.Cmd, + DNS: in.DNS, + Image: in.Image, + Volumes: in.Volumes, + VolumesFrom: in.VolumesFrom, + WorkingDir: in.WorkingDir, + Entrypoint: in.Entrypoint, + NetworkDisabled: in.NetworkDisabled, + SecurityOpts: in.SecurityOpts, + OnBuild: in.OnBuild, + Labels: in.Labels, + } + return nil } diff --git a/pkg/image/apis/image/zz_generated.deepcopy.go b/pkg/image/apis/image/zz_generated.deepcopy.go index 0f6a00114cdc..1fcf28298d9a 100644 --- a/pkg/image/apis/image/zz_generated.deepcopy.go +++ b/pkg/image/apis/image/zz_generated.deepcopy.go @@ -9,323 +9,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Descriptor) DeepCopyInto(out *Descriptor) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. -func (in *Descriptor) DeepCopy() *Descriptor { - if in == nil { - return nil - } - out := new(Descriptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { - *out = *in - if in.PortSpecs != nil { - in, out := &in.PortSpecs, &out.PortSpecs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExposedPorts != nil { - in, out := &in.ExposedPorts, &out.ExposedPorts - *out = make(map[string]struct{}, len(*in)) - for key := range *in { - (*out)[key] = struct{}{} - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Cmd != nil { - in, out := &in.Cmd, &out.Cmd - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make(map[string]struct{}, len(*in)) - for key := range *in { - (*out)[key] = struct{}{} - } - } - if in.Entrypoint != nil { - in, out := &in.Entrypoint, &out.Entrypoint - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecurityOpts != nil { - in, out := &in.SecurityOpts, &out.SecurityOpts - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.OnBuild != nil { - in, out := &in.OnBuild, &out.OnBuild - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. -func (in *DockerConfig) DeepCopy() *DockerConfig { - if in == nil { - return nil - } - out := new(DockerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerConfigHistory) DeepCopyInto(out *DockerConfigHistory) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfigHistory. -func (in *DockerConfigHistory) DeepCopy() *DockerConfigHistory { - if in == nil { - return nil - } - out := new(DockerConfigHistory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerConfigRootFS) DeepCopyInto(out *DockerConfigRootFS) { - *out = *in - if in.DiffIDs != nil { - in, out := &in.DiffIDs, &out.DiffIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfigRootFS. -func (in *DockerConfigRootFS) DeepCopy() *DockerConfigRootFS { - if in == nil { - return nil - } - out := new(DockerConfigRootFS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerFSLayer) DeepCopyInto(out *DockerFSLayer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerFSLayer. -func (in *DockerFSLayer) DeepCopy() *DockerFSLayer { - if in == nil { - return nil - } - out := new(DockerFSLayer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerHistory) DeepCopyInto(out *DockerHistory) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerHistory. -func (in *DockerHistory) DeepCopy() *DockerHistory { - if in == nil { - return nil - } - out := new(DockerHistory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerImage) DeepCopyInto(out *DockerImage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.Created.DeepCopyInto(&out.Created) - in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) - if in.Config != nil { - in, out := &in.Config, &out.Config - if *in == nil { - *out = nil - } else { - *out = new(DockerConfig) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. -func (in *DockerImage) DeepCopy() *DockerImage { - if in == nil { - return nil - } - out := new(DockerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DockerImage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerImageConfig) DeepCopyInto(out *DockerImageConfig) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) - if in.Config != nil { - in, out := &in.Config, &out.Config - if *in == nil { - *out = nil - } else { - *out = new(DockerConfig) - (*in).DeepCopyInto(*out) - } - } - if in.RootFS != nil { - in, out := &in.RootFS, &out.RootFS - if *in == nil { - *out = nil - } else { - *out = new(DockerConfigRootFS) - (*in).DeepCopyInto(*out) - } - } - if in.History != nil { - in, out := &in.History, &out.History - *out = make([]DockerConfigHistory, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.OSFeatures != nil { - in, out := &in.OSFeatures, &out.OSFeatures - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageConfig. -func (in *DockerImageConfig) DeepCopy() *DockerImageConfig { - if in == nil { - return nil - } - out := new(DockerImageConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerImageManifest) DeepCopyInto(out *DockerImageManifest) { - *out = *in - if in.FSLayers != nil { - in, out := &in.FSLayers, &out.FSLayers - *out = make([]DockerFSLayer, len(*in)) - copy(*out, *in) - } - if in.History != nil { - in, out := &in.History, &out.History - *out = make([]DockerHistory, len(*in)) - copy(*out, *in) - } - if in.Layers != nil { - in, out := &in.Layers, &out.Layers - *out = make([]Descriptor, len(*in)) - copy(*out, *in) - } - out.Config = in.Config - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageManifest. -func (in *DockerImageManifest) DeepCopy() *DockerImageManifest { - if in == nil { - return nil - } - out := new(DockerImageManifest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerV1CompatibilityImage) DeepCopyInto(out *DockerV1CompatibilityImage) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) - if in.Config != nil { - in, out := &in.Config, &out.Config - if *in == nil { - *out = nil - } else { - *out = new(DockerConfig) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerV1CompatibilityImage. -func (in *DockerV1CompatibilityImage) DeepCopy() *DockerV1CompatibilityImage { - if in == nil { - return nil - } - out := new(DockerV1CompatibilityImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerV1CompatibilityImageSize) DeepCopyInto(out *DockerV1CompatibilityImageSize) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerV1CompatibilityImageSize. -func (in *DockerV1CompatibilityImageSize) DeepCopy() *DockerV1CompatibilityImageSize { - if in == nil { - return nil - } - out := new(DockerV1CompatibilityImageSize) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Image) DeepCopyInto(out *Image) { *out = *in diff --git a/pkg/image/importer/dockerv1client/client.go b/pkg/image/importer/dockerv1client/client.go index 035b0cef964e..f26e7ef2ca5e 100644 --- a/pkg/image/importer/dockerv1client/client.go +++ b/pkg/image/importer/dockerv1client/client.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" imageapi "github.com/openshift/origin/pkg/image/apis/image" + "github.com/openshift/origin/pkg/image/apis/image/docker10" "github.com/openshift/origin/pkg/image/apis/image/reference" ) @@ -161,7 +162,7 @@ func normalizeDockerHubHost(host string, v2 bool) string { func normalizeRegistryName(name string) (*url.URL, error) { prefix := name if len(prefix) == 0 { - prefix = imageapi.DockerDefaultV1Registry + prefix = reference.DockerDefaultV1Registry } hadPrefix := false switch { @@ -747,7 +748,7 @@ func (repo *v2repository) getImageConfig(c *connection, dgst string) ([]byte, er } func (repo *v2repository) unmarshalImageManifest(c *connection, body []byte) (*docker.Image, error) { - manifest := imageapi.DockerImageManifest{} + manifest := docker10.DockerImageManifest{} if err := json.Unmarshal(body, &manifest); err != nil { return nil, err } diff --git a/pkg/image/util/helpers.go b/pkg/image/util/helpers.go index b8b1984aedd3..ebe0e030f3cf 100644 --- a/pkg/image/util/helpers.go +++ b/pkg/image/util/helpers.go @@ -12,9 +12,10 @@ import ( godigest "github.com/opencontainers/go-digest" imageapi "github.com/openshift/origin/pkg/image/apis/image" + "github.com/openshift/origin/pkg/image/apis/image/docker10" ) -func fillImageLayers(image *imageapi.Image, manifest imageapi.DockerImageManifest) error { +func fillImageLayers(image *imageapi.Image, manifest docker10.DockerImageManifest) error { if len(image.DockerImageLayers) != 0 { // DockerImageLayers is already filled by the registry. return nil @@ -30,7 +31,7 @@ func fillImageLayers(image *imageapi.Image, manifest imageapi.DockerImageManifes for i, obj := range manifest.History { layer := manifest.FSLayers[i] - var size imageapi.DockerV1CompatibilityImageSize + var size docker10.DockerV1CompatibilityImageSize if err := json.Unmarshal([]byte(obj.DockerV1Compatibility), &size); err != nil { size.Size = 0 } @@ -80,7 +81,7 @@ func ImageWithMetadata(image *imageapi.Image) error { return nil } - manifest := imageapi.DockerImageManifest{} + manifest := docker10.DockerImageManifest{} if err := json.Unmarshal([]byte(image.DockerImageManifest), &manifest); err != nil { return err } @@ -99,21 +100,14 @@ func ImageWithMetadata(image *imageapi.Image) error { return fmt.Errorf("the image %s (%s) has a schema 1 manifest, but it doesn't have history", image.Name, image.DockerImageReference) } - v1Metadata := imageapi.DockerV1CompatibilityImage{} + v1Metadata := docker10.DockerV1CompatibilityImage{} if err := json.Unmarshal([]byte(manifest.History[0].DockerV1Compatibility), &v1Metadata); err != nil { return err } - image.DockerImageMetadata.ID = v1Metadata.ID - image.DockerImageMetadata.Parent = v1Metadata.Parent - image.DockerImageMetadata.Comment = v1Metadata.Comment - image.DockerImageMetadata.Created = v1Metadata.Created - image.DockerImageMetadata.Container = v1Metadata.Container - image.DockerImageMetadata.ContainerConfig = v1Metadata.ContainerConfig - image.DockerImageMetadata.DockerVersion = v1Metadata.DockerVersion - image.DockerImageMetadata.Author = v1Metadata.Author - image.DockerImageMetadata.Config = v1Metadata.Config - image.DockerImageMetadata.Architecture = v1Metadata.Architecture + if err := imageapi.Convert_compatibility_to_api_DockerImage(&v1Metadata, &image.DockerImageMetadata); err != nil { + return err + } case 2: image.DockerImageManifestMediaType = schema2.MediaTypeManifest @@ -121,21 +115,16 @@ func ImageWithMetadata(image *imageapi.Image) error { return fmt.Errorf("dockerImageConfig must not be empty for manifest schema 2") } - config := imageapi.DockerImageConfig{} + config := docker10.DockerImageConfig{} if err := json.Unmarshal([]byte(image.DockerImageConfig), &config); err != nil { return fmt.Errorf("failed to parse dockerImageConfig: %v", err) } + if err := imageapi.Convert_imageconfig_to_api_DockerImage(&config, &image.DockerImageMetadata); err != nil { + return err + } image.DockerImageMetadata.ID = manifest.Config.Digest - image.DockerImageMetadata.Parent = config.Parent - image.DockerImageMetadata.Comment = config.Comment - image.DockerImageMetadata.Created = config.Created - image.DockerImageMetadata.Container = config.Container - image.DockerImageMetadata.ContainerConfig = config.ContainerConfig - image.DockerImageMetadata.DockerVersion = config.DockerVersion - image.DockerImageMetadata.Author = config.Author - image.DockerImageMetadata.Config = config.Config - image.DockerImageMetadata.Architecture = config.Architecture + default: return fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, image.Name, image.DockerImageReference) } From da471b8551b96d456346beb7961bfa52bd74c397 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sun, 17 Jun 2018 23:57:19 -0400 Subject: [PATCH 4/7] Split private mirror functions for reuse These files will be copied for now and then refactored later into reusable packages. --- pkg/oc/cli/cmd/image/mirror/manifest.go | 178 +++++++++++++++++++++++ pkg/oc/cli/cmd/image/mirror/mappings.go | 82 +---------- pkg/oc/cli/cmd/image/mirror/mirror.go | 164 +-------------------- pkg/oc/cli/cmd/image/mirror/workqueue.go | 131 +++++++++++++++++ 4 files changed, 312 insertions(+), 243 deletions(-) create mode 100644 pkg/oc/cli/cmd/image/mirror/manifest.go create mode 100644 pkg/oc/cli/cmd/image/mirror/workqueue.go diff --git a/pkg/oc/cli/cmd/image/mirror/manifest.go b/pkg/oc/cli/cmd/image/mirror/manifest.go new file mode 100644 index 000000000000..ac775c73f583 --- /dev/null +++ b/pkg/oc/cli/cmd/image/mirror/manifest.go @@ -0,0 +1,178 @@ +package mirror + +import ( + "context" + "fmt" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + + "github.com/docker/libtrust" + "github.com/golang/glog" + digest "github.com/opencontainers/go-digest" + + imageapi "github.com/openshift/origin/pkg/image/apis/image" +) + +func processManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imageapi.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor, bool) bool) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { + var srcManifests []distribution.Manifest + switch t := srcManifest.(type) { + case *manifestlist.DeserializedManifestList: + manifestDigest := srcDigest + manifestList := t + + filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) + for _, manifest := range t.Manifests { + if !filterFn(&manifest, len(t.Manifests) > 1) { + glog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref) + continue + } + glog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref) + filtered = append(filtered, manifest) + } + + if len(filtered) == 0 { + return nil, nil, "", nil + } + + // if we're filtering the manifest list, update the source manifest and digest + if len(filtered) != len(t.Manifests) { + var err error + t, err = manifestlist.FromDescriptors(filtered) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) + } + _, body, err := t.Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) + } + manifestList = t + manifestDigest = srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) + } + + for i, manifest := range t.Manifests { + childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest})) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) + } + srcManifests = append(srcManifests, childManifest) + } + + switch { + case len(srcManifests) == 1: + _, body, err := srcManifests[0].Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to convert source image %s manifest list to single manifest: %v", ref, err) + } + manifestDigest := srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Used only one manifest from the list %s", manifestDigest) + return srcManifests, srcManifests[0], manifestDigest, nil + default: + return append(srcManifests, manifestList), manifestList, manifestDigest, nil + } + + default: + return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil + } +} + +// TDOO: remove when quay.io switches to v2 schema +func putManifestInCompatibleSchema( + ctx context.Context, + srcManifest distribution.Manifest, + tag string, + toManifests distribution.ManifestService, + // supports schema2 -> schema1 downconversion + blobs distribution.BlobService, + ref reference.Named, +) (digest.Digest, error) { + var options []distribution.ManifestServiceOption + if len(tag) > 0 { + glog.V(5).Infof("Put manifest %s:%s", ref, tag) + options = []distribution.ManifestServiceOption{distribution.WithTag(tag)} + } else { + glog.V(5).Infof("Put manifest %s", ref) + } + toDigest, err := toManifests.Put(ctx, srcManifest, options...) + if err == nil { + return toDigest, nil + } + errs, ok := err.(errcode.Errors) + if !ok || len(errs) == 0 { + return toDigest, err + } + errcode, ok := errs[0].(errcode.Error) + if !ok || errcode.ErrorCode() != v2.ErrorCodeManifestInvalid { + return toDigest, err + } + // try downconverting to v2-schema1 + schema2Manifest, ok := srcManifest.(*schema2.DeserializedManifest) + if !ok { + return toDigest, err + } + tagRef, tagErr := reference.WithTag(ref, tag) + if tagErr != nil { + return toDigest, err + } + glog.V(5).Infof("Registry reported invalid manifest error, attempting to convert to v2schema1 as ref %s", tagRef) + schema1Manifest, convertErr := convertToSchema1(ctx, blobs, schema2Manifest, tagRef) + if convertErr != nil { + return toDigest, err + } + if glog.V(6) { + _, data, _ := schema1Manifest.Payload() + glog.Infof("Converted to v2schema1\n%s", string(data)) + } + return toManifests.Put(ctx, schema1Manifest, distribution.WithTag(tag)) +} + +// TDOO: remove when quay.io switches to v2 schema +func convertToSchema1(ctx context.Context, blobs distribution.BlobService, schema2Manifest *schema2.DeserializedManifest, ref reference.Named) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + configJSON, err := blobs.Get(ctx, targetDescriptor.Digest) + if err != nil { + return nil, err + } + trustKey, err := loadPrivateKey() + if err != nil { + return nil, err + } + builder := schema1.NewConfigManifestBuilder(blobs, trustKey, ref, configJSON) + for _, d := range schema2Manifest.Layers { + if err := builder.AppendReference(d); err != nil { + return nil, err + } + } + manifest, err := builder.Build(ctx) + if err != nil { + return nil, err + } + return manifest, nil +} + +var ( + privateKeyLock sync.Mutex + privateKey libtrust.PrivateKey +) + +// TDOO: remove when quay.io switches to v2 schema +func loadPrivateKey() (libtrust.PrivateKey, error) { + privateKeyLock.Lock() + defer privateKeyLock.Unlock() + if privateKey != nil { + return privateKey, nil + } + trustKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + privateKey = trustKey + return privateKey, nil +} diff --git a/pkg/oc/cli/cmd/image/mirror/mappings.go b/pkg/oc/cli/cmd/image/mirror/mappings.go index 558d68bd70d5..026150b70f66 100644 --- a/pkg/oc/cli/cmd/image/mirror/mappings.go +++ b/pkg/oc/cli/cmd/image/mirror/mappings.go @@ -7,11 +7,8 @@ import ( "strings" "sync" - "github.com/golang/glog" - "github.com/docker/distribution/registry/client/auth" - - godigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" imageapi "github.com/openshift/origin/pkg/image/apis/image" ) @@ -170,7 +167,7 @@ type destinations struct { digests map[string]pushTargets } -func (d *destinations) mergeIntoDigests(srcDigest godigest.Digest, target pushTargets) { +func (d *destinations) mergeIntoDigests(srcDigest digest.Digest, target pushTargets) { d.lock.Lock() defer d.lock.Unlock() srcKey := srcDigest.String() @@ -278,78 +275,3 @@ func calculateDockerRegistryScopes(tree targetTree) map[string][]auth.Scope { } return uniqueScopes } - -type workQueue struct { - ch chan workUnit - wg *sync.WaitGroup -} - -func newWorkQueue(workers int, stopCh <-chan struct{}) *workQueue { - q := &workQueue{ - ch: make(chan workUnit, 100), - wg: &sync.WaitGroup{}, - } - go q.run(workers, stopCh) - return q -} - -func (q *workQueue) run(workers int, stopCh <-chan struct{}) { - for i := 0; i < workers; i++ { - go func(i int) { - defer glog.V(4).Infof("worker %d stopping", i) - for { - select { - case work, ok := <-q.ch: - if !ok { - return - } - work.fn() - work.wg.Done() - case <-stopCh: - return - } - } - }(i) - } - <-stopCh -} - -func (q *workQueue) Batch(fn func(Work)) { - w := &worker{ - wg: &sync.WaitGroup{}, - ch: q.ch, - } - fn(w) - w.wg.Wait() -} - -func (q *workQueue) Queue(fn func(Work)) { - w := &worker{ - wg: q.wg, - ch: q.ch, - } - fn(w) -} - -func (q *workQueue) Done() { - q.wg.Wait() -} - -type workUnit struct { - fn func() - wg *sync.WaitGroup -} - -type Work interface { - Parallel(fn func()) -} - -type worker struct { - wg *sync.WaitGroup - ch chan workUnit -} - -func (w *worker) Parallel(fn func()) { - w.wg.Add(1) - w.ch <- workUnit{wg: w.wg, fn: fn} -} diff --git a/pkg/oc/cli/cmd/image/mirror/mirror.go b/pkg/oc/cli/cmd/image/mirror/mirror.go index e2e15ab78ec4..1282e3eb8e58 100644 --- a/pkg/oc/cli/cmd/image/mirror/mirror.go +++ b/pkg/oc/cli/cmd/image/mirror/mirror.go @@ -5,20 +5,15 @@ import ( "fmt" "io" "regexp" - "sync" "time" "github.com/docker/distribution" "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client" units "github.com/docker/go-units" - "github.com/docker/libtrust" "github.com/golang/glog" godigest "github.com/opencontainers/go-digest" "github.com/spf13/cobra" @@ -193,7 +188,7 @@ func (o *pushOptions) Repository(ctx context.Context, context *registryclient.Co } // includeDescriptor returns true if the provided manifest should be included. -func (o *pushOptions) includeDescriptor(d *manifestlist.ManifestDescriptor) bool { +func (o *pushOptions) includeDescriptor(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { if o.OSFilter == nil { return true } @@ -466,69 +461,6 @@ func (o *pushOptions) plan() (*plan, error) { return plan, nil } -func processManifestList(ctx context.Context, srcDigest godigest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imageapi.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor) bool) ([]distribution.Manifest, distribution.Manifest, godigest.Digest, error) { - var srcManifests []distribution.Manifest - switch t := srcManifest.(type) { - case *manifestlist.DeserializedManifestList: - manifestDigest := srcDigest - manifestList := t - - filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) - for _, manifest := range t.Manifests { - if !filterFn(&manifest) { - glog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref) - continue - } - glog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref) - filtered = append(filtered, manifest) - } - - if len(filtered) == 0 { - return nil, nil, "", nil - } - - // if we're filtering the manifest list, update the source manifest and digest - if len(filtered) != len(t.Manifests) { - var err error - t, err = manifestlist.FromDescriptors(filtered) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) - } - _, body, err := t.Payload() - if err != nil { - return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) - } - manifestList = t - manifestDigest = srcDigest.Algorithm().FromBytes(body) - glog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) - } - - for i, manifest := range t.Manifests { - childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest})) - if err != nil { - return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) - } - srcManifests = append(srcManifests, childManifest) - } - - switch { - case len(srcManifests) == 1: - _, body, err := srcManifests[0].Payload() - if err != nil { - return nil, nil, "", fmt.Errorf("unable to convert source image %s manifest list to single manifest: %v", ref, err) - } - manifestDigest := srcDigest.Algorithm().FromBytes(body) - glog.V(5).Infof("Used only one manifest from the list %s", manifestDigest) - return srcManifests, srcManifests[0], manifestDigest, nil - default: - return append(srcManifests, manifestList), manifestList, manifestDigest, nil - } - - default: - return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil - } -} - func copyBlob(ctx context.Context, plan *workPlan, c *repositoryBlobCopy, blob distribution.Descriptor, force, skipMount bool, errOut io.Writer) error { // if we aren't forcing upload, check to see if the blob aleady exists if !force { @@ -703,100 +635,6 @@ func copyManifests( return errs } -// TDOO: remove when quay.io switches to v2 schema -func putManifestInCompatibleSchema( - ctx context.Context, - srcManifest distribution.Manifest, - tag string, - toManifests distribution.ManifestService, - // supports schema2 -> schema1 downconversion - blobs distribution.BlobService, - ref reference.Named, -) (godigest.Digest, error) { - var options []distribution.ManifestServiceOption - if len(tag) > 0 { - glog.V(5).Infof("Put manifest %s:%s", ref, tag) - options = []distribution.ManifestServiceOption{distribution.WithTag(tag)} - } else { - glog.V(5).Infof("Put manifest %s", ref) - } - toDigest, err := toManifests.Put(ctx, srcManifest, options...) - if err == nil { - return toDigest, nil - } - errs, ok := err.(errcode.Errors) - if !ok || len(errs) == 0 { - return toDigest, err - } - errcode, ok := errs[0].(errcode.Error) - if !ok || errcode.ErrorCode() != v2.ErrorCodeManifestInvalid { - return toDigest, err - } - // try downconverting to v2-schema1 - schema2Manifest, ok := srcManifest.(*schema2.DeserializedManifest) - if !ok { - return toDigest, err - } - tagRef, tagErr := reference.WithTag(ref, tag) - if tagErr != nil { - return toDigest, err - } - glog.V(5).Infof("Registry reported invalid manifest error, attempting to convert to v2schema1 as ref %s", tagRef) - schema1Manifest, convertErr := convertToSchema1(ctx, blobs, schema2Manifest, tagRef) - if convertErr != nil { - return toDigest, err - } - if glog.V(6) { - _, data, _ := schema1Manifest.Payload() - glog.Infof("Converted to v2schema1\n%s", string(data)) - } - return toManifests.Put(ctx, schema1Manifest, distribution.WithTag(tag)) -} - -// TDOO: remove when quay.io switches to v2 schema -func convertToSchema1(ctx context.Context, blobs distribution.BlobService, schema2Manifest *schema2.DeserializedManifest, ref reference.Named) (distribution.Manifest, error) { - targetDescriptor := schema2Manifest.Target() - configJSON, err := blobs.Get(ctx, targetDescriptor.Digest) - if err != nil { - return nil, err - } - trustKey, err := loadPrivateKey() - if err != nil { - return nil, err - } - builder := schema1.NewConfigManifestBuilder(blobs, trustKey, ref, configJSON) - for _, d := range schema2Manifest.Layers { - if err := builder.AppendReference(d); err != nil { - return nil, err - } - } - manifest, err := builder.Build(ctx) - if err != nil { - return nil, err - } - return manifest, nil -} - -var ( - privateKeyLock sync.Mutex - privateKey libtrust.PrivateKey -) - -// TDOO: remove when quay.io switches to v2 schema -func loadPrivateKey() (libtrust.PrivateKey, error) { - privateKeyLock.Lock() - defer privateKeyLock.Unlock() - if privateKey != nil { - return privateKey, nil - } - trustKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err - } - privateKey = trustKey - return privateKey, nil -} - type optionFunc func(interface{}) error func (f optionFunc) Apply(v interface{}) error { diff --git a/pkg/oc/cli/cmd/image/mirror/workqueue.go b/pkg/oc/cli/cmd/image/mirror/workqueue.go new file mode 100644 index 000000000000..6587bff17816 --- /dev/null +++ b/pkg/oc/cli/cmd/image/mirror/workqueue.go @@ -0,0 +1,131 @@ +package mirror + +import ( + "sync" + + "github.com/golang/glog" +) + +type workQueue struct { + ch chan workUnit + wg *sync.WaitGroup +} + +func newWorkQueue(workers int, stopCh <-chan struct{}) *workQueue { + q := &workQueue{ + ch: make(chan workUnit, 100), + wg: &sync.WaitGroup{}, + } + go q.run(workers, stopCh) + return q +} + +func (q *workQueue) run(workers int, stopCh <-chan struct{}) { + for i := 0; i < workers; i++ { + go func(i int) { + defer glog.V(4).Infof("worker %d stopping", i) + for { + select { + case work, ok := <-q.ch: + if !ok { + return + } + work.fn() + work.wg.Done() + case <-stopCh: + return + } + } + }(i) + } + <-stopCh +} + +func (q *workQueue) Batch(fn func(Work)) { + w := &worker{ + wg: &sync.WaitGroup{}, + ch: q.ch, + } + fn(w) + w.wg.Wait() +} + +func (q *workQueue) Try(fn func(Try)) error { + w := &worker{ + wg: &sync.WaitGroup{}, + ch: q.ch, + err: make(chan error), + } + fn(w) + return w.FirstError() +} + +func (q *workQueue) Queue(fn func(Work)) { + w := &worker{ + wg: q.wg, + ch: q.ch, + } + fn(w) +} + +func (q *workQueue) Done() { + q.wg.Wait() +} + +type workUnit struct { + fn func() + wg *sync.WaitGroup +} + +type Work interface { + Parallel(fn func()) +} + +type Try interface { + Try(fn func() error) +} + +type worker struct { + wg *sync.WaitGroup + ch chan workUnit + err chan error +} + +func (w *worker) FirstError() error { + done := make(chan struct{}) + go func() { + w.wg.Wait() + close(done) + }() + for { + select { + case err := <-w.err: + if err != nil { + return err + } + case <-done: + return nil + } + } +} + +func (w *worker) Parallel(fn func()) { + w.wg.Add(1) + w.ch <- workUnit{wg: w.wg, fn: fn} +} + +func (w *worker) Try(fn func() error) { + w.wg.Add(1) + w.ch <- workUnit{ + wg: w.wg, + fn: func() { + err := fn() + if w.err == nil { + // TODO: have the work queue accumulate errors and release them with Done() + glog.Errorf("Worker error: %v", err) + return + } + w.err <- err + }, + } +} From 80adc18dc3c332584f126e319c22269c0af378d9 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sat, 7 Jul 2018 22:26:06 -0400 Subject: [PATCH 5/7] registry login should not encode service account name system:serviceaccount:blah:blah is incorrectly encoded into base64 for authorization, causing the successive login to fail. Instead, encode it as system-serviceaccount-namespace-name which is ignored by the registry. Also add an insecure flag to skip-check that will bypass validating TLS certs during the skip-check. --- contrib/completions/bash/oc | 2 ++ contrib/completions/zsh/oc | 2 ++ pkg/oc/cli/cmd/registry/login/login.go | 12 +++++++++--- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/contrib/completions/bash/oc b/contrib/completions/bash/oc index 0dcc50b0dc33..0542cb2de734 100644 --- a/contrib/completions/bash/oc +++ b/contrib/completions/bash/oc @@ -13669,6 +13669,8 @@ _oc_registry_login() flags_with_completion=() flags_completion=() + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") flags+=("--registry=") local_nonpersistent_flags+=("--registry=") flags+=("--service-account=") diff --git a/contrib/completions/zsh/oc b/contrib/completions/zsh/oc index 40a60c590330..81136d76ad80 100644 --- a/contrib/completions/zsh/oc +++ b/contrib/completions/zsh/oc @@ -13811,6 +13811,8 @@ _oc_registry_login() flags_with_completion=() flags_completion=() + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") flags+=("--registry=") local_nonpersistent_flags+=("--registry=") flags+=("--service-account=") diff --git a/pkg/oc/cli/cmd/registry/login/login.go b/pkg/oc/cli/cmd/registry/login/login.go index afe1aa31edcf..9caafef78d86 100644 --- a/pkg/oc/cli/cmd/registry/login/login.go +++ b/pkg/oc/cli/cmd/registry/login/login.go @@ -19,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apirequest "k8s.io/apiserver/pkg/endpoints/request" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/util/homedir" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -82,6 +83,7 @@ type LoginOptions struct { Credentials Credentials HostPort string SkipCheck bool + Insecure bool CreateDirectory bool Out io.Writer @@ -114,6 +116,7 @@ func New(name string, f kcmdutil.Factory, out, errOut io.Writer) *cobra.Command flag.StringVarP(&o.ServiceAccount, "service-account", "z", o.ServiceAccount, "Log in as the specified service account name in the specified namespace.") flag.StringVar(&o.HostPort, "registry", o.HostPort, "An alternate domain name and port to use for the registry, defaults to the cluster's configured external hostname.") flag.BoolVar(&o.SkipCheck, "skip-check", o.SkipCheck, "Skip checking the credentials against the registry.") + flag.BoolVar(&o.Insecure, "insecure", o.Insecure, "Bypass HTTPS certificate verification when checking the registry login.") return cmd } @@ -155,7 +158,7 @@ func (o *LoginOptions) Complete(f kcmdutil.Factory, args []string) error { if len(token) == 0 { continue } - o.Credentials = newCredentials(fmt.Sprintf("system:serviceaccount:%s:%s", ns, o.ServiceAccount), string(token)) + o.Credentials = newCredentials(fmt.Sprintf("system-serviceaccount-%s-%s", ns, o.ServiceAccount), string(token)) break } if o.Credentials.Empty() { @@ -239,9 +242,12 @@ func (o *LoginOptions) Run() error { creds := registryclient.NewBasicCredentials() url := &url.URL{Host: o.HostPort} creds.Add(url, o.Credentials.Username, o.Credentials.Password) - c := registryclient.NewContext(http.DefaultTransport, http.DefaultTransport).WithCredentials(creds) - _, err := c.Repository(ctx, url, "does_not_exist", false) + insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}}) if err != nil { + return err + } + c := registryclient.NewContext(http.DefaultTransport, insecureRT).WithCredentials(creds) + if _, err := c.Repository(ctx, url, "does_not_exist", o.Insecure); err != nil { return fmt.Errorf("unable to check your credentials - pass --skip-check to bypass this error: %v", err) } } From 1346deba40cdd8cfd2799b720f0ae76c960247d0 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sat, 7 Jul 2018 22:36:28 -0400 Subject: [PATCH 6/7] Update import-restrictions.json with new packages These are moves of existing code in the same set, or new, simple dependency packages that bring in no major subtrees. --- hack/import-restrictions.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hack/import-restrictions.json b/hack/import-restrictions.json index ddec39aae35e..ab5305695e9a 100644 --- a/hack/import-restrictions.json +++ b/hack/import-restrictions.json @@ -166,6 +166,7 @@ "vendor/k8s.io/apimachinery", "github.com/openshift/origin/pkg/image/apis/image/internal", "github.com/openshift/origin/pkg/image/apis/image/reference", + "github.com/openshift/origin/pkg/image/apis/image/docker10", "vendor/k8s.io/api", "vendor/github.com/openshift/api", "github.com/openshift/origin/pkg/api/apihelpers" @@ -491,6 +492,10 @@ "github.com/openshift/origin/pkg/image/apis/image", "github.com/openshift/origin/pkg/image/apis/image/install", "github.com/openshift/origin/pkg/image/apis/image/v1/trigger", + "github.com/openshift/origin/pkg/image/apis/image/docker10", + "github.com/openshift/origin/pkg/image/apis/image/reference", + "github.com/openshift/origin/pkg/image/dockerlayer", + "github.com/openshift/origin/pkg/image/dockerlayer/add", "github.com/openshift/origin/pkg/image/importer/dockerv1client", "github.com/openshift/origin/pkg/image/registryclient", "github.com/openshift/origin/pkg/image/registryclient/dockercredentials", From 2721b3b4f813037253a7317b8bed2047f888c7cf Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sun, 17 Jun 2018 23:58:02 -0400 Subject: [PATCH 7/7] Add `oc image append` which adds layers to a schema1/2 image This command can take zero or more gzipped layer tars (in Docker layer format) and append them to an existing image or a scratch image and then push the new image to a registry. Layers in the existing image are pushed as well. The caller can mutate the provided config as it goes. --- contrib/completions/bash/oc | 64 ++ contrib/completions/zsh/oc | 64 ++ docs/man/man1/.files_generated_oc | 1 + docs/man/man1/oc-image-append.1 | 3 + pkg/image/apis/image/docker10/conversion.go | 24 + pkg/image/dockerlayer/add/add.go | 140 +++++ pkg/oc/cli/cmd/image/append/append.go | 656 ++++++++++++++++++++ pkg/oc/cli/cmd/image/append/manifest.go | 178 ++++++ pkg/oc/cli/cmd/image/append/workqueue.go | 131 ++++ pkg/oc/cli/cmd/image/image.go | 2 + pkg/oc/cli/cmd/image/mirror/manifest.go | 4 +- pkg/oc/cli/cmd/image/mirror/mappings.go | 18 +- pkg/oc/cli/cmd/image/mirror/mirror.go | 4 +- pkg/oc/cli/cmd/image/mirror/plan.go | 17 +- test/extended/images/append.go | 141 +++++ 15 files changed, 1426 insertions(+), 21 deletions(-) create mode 100644 docs/man/man1/oc-image-append.1 create mode 100644 pkg/image/apis/image/docker10/conversion.go create mode 100644 pkg/image/dockerlayer/add/add.go create mode 100644 pkg/oc/cli/cmd/image/append/append.go create mode 100644 pkg/oc/cli/cmd/image/append/manifest.go create mode 100644 pkg/oc/cli/cmd/image/append/workqueue.go create mode 100644 test/extended/images/append.go diff --git a/contrib/completions/bash/oc b/contrib/completions/bash/oc index 0542cb2de734..a72fa6332f3c 100644 --- a/contrib/completions/bash/oc +++ b/contrib/completions/bash/oc @@ -11695,6 +11695,69 @@ _oc_idle() noun_aliases=() } +_oc_image_append() +{ + last_command="oc_image_append" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--created-at=") + local_nonpersistent_flags+=("--created-at=") + flags+=("--drop-history") + local_nonpersistent_flags+=("--drop-history") + flags+=("--dry-run") + local_nonpersistent_flags+=("--dry-run") + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--force") + local_nonpersistent_flags+=("--force") + flags+=("--from=") + local_nonpersistent_flags+=("--from=") + flags+=("--image=") + local_nonpersistent_flags+=("--image=") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--max-per-registry=") + local_nonpersistent_flags+=("--max-per-registry=") + flags+=("--meta=") + local_nonpersistent_flags+=("--meta=") + flags+=("--to=") + local_nonpersistent_flags+=("--to=") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--cache-dir=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--config=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _oc_image_mirror() { last_command="oc_image_mirror" @@ -11761,6 +11824,7 @@ _oc_image() { last_command="oc_image" commands=() + commands+=("append") commands+=("mirror") flags=() diff --git a/contrib/completions/zsh/oc b/contrib/completions/zsh/oc index 81136d76ad80..29def1aeaad0 100644 --- a/contrib/completions/zsh/oc +++ b/contrib/completions/zsh/oc @@ -11837,6 +11837,69 @@ _oc_idle() noun_aliases=() } +_oc_image_append() +{ + last_command="oc_image_append" + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--created-at=") + local_nonpersistent_flags+=("--created-at=") + flags+=("--drop-history") + local_nonpersistent_flags+=("--drop-history") + flags+=("--dry-run") + local_nonpersistent_flags+=("--dry-run") + flags+=("--filter-by-os=") + local_nonpersistent_flags+=("--filter-by-os=") + flags+=("--force") + local_nonpersistent_flags+=("--force") + flags+=("--from=") + local_nonpersistent_flags+=("--from=") + flags+=("--image=") + local_nonpersistent_flags+=("--image=") + flags+=("--insecure") + local_nonpersistent_flags+=("--insecure") + flags+=("--max-per-registry=") + local_nonpersistent_flags+=("--max-per-registry=") + flags+=("--meta=") + local_nonpersistent_flags+=("--meta=") + flags+=("--to=") + local_nonpersistent_flags+=("--to=") + flags+=("--as=") + flags+=("--as-group=") + flags+=("--cache-dir=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--config=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--loglevel=") + flags+=("--logspec=") + flags+=("--match-server-version") + flags+=("--namespace=") + flags_with_completion+=("--namespace") + flags_completion+=("__oc_get_namespaces") + two_word_flags+=("-n") + flags_with_completion+=("-n") + flags_completion+=("__oc_get_namespaces") + flags+=("--request-timeout=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--token=") + flags+=("--user=") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + _oc_image_mirror() { last_command="oc_image_mirror" @@ -11903,6 +11966,7 @@ _oc_image() { last_command="oc_image" commands=() + commands+=("append") commands+=("mirror") flags=() diff --git a/docs/man/man1/.files_generated_oc b/docs/man/man1/.files_generated_oc index 278f10abe0da..8a1e4c108749 100644 --- a/docs/man/man1/.files_generated_oc +++ b/docs/man/man1/.files_generated_oc @@ -228,6 +228,7 @@ oc-expose.1 oc-extract.1 oc-get.1 oc-idle.1 +oc-image-append.1 oc-image-mirror.1 oc-image.1 oc-import-app.json.1 diff --git a/docs/man/man1/oc-image-append.1 b/docs/man/man1/oc-image-append.1 new file mode 100644 index 000000000000..b6fd7a0f9896 --- /dev/null +++ b/docs/man/man1/oc-image-append.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/pkg/image/apis/image/docker10/conversion.go b/pkg/image/apis/image/docker10/conversion.go new file mode 100644 index 000000000000..e3a278925c74 --- /dev/null +++ b/pkg/image/apis/image/docker10/conversion.go @@ -0,0 +1,24 @@ +package docker10 + +// Convert_DockerV1CompatibilityImage_to_DockerImageConfig takes a Docker registry digest +// (schema 2.1) and converts it to the external API version of Image. +func Convert_DockerV1CompatibilityImage_to_DockerImageConfig(in *DockerV1CompatibilityImage, out *DockerImageConfig) error { + *out = DockerImageConfig{ + ID: in.ID, + Parent: in.Parent, + Comment: in.Comment, + Created: in.Created, + Container: in.Container, + DockerVersion: in.DockerVersion, + Author: in.Author, + Architecture: in.Architecture, + Size: in.Size, + OS: "linux", + ContainerConfig: in.ContainerConfig, + } + if in.Config != nil { + out.Config = &DockerConfig{} + *out.Config = *in.Config + } + return nil +} diff --git a/pkg/image/dockerlayer/add/add.go b/pkg/image/dockerlayer/add/add.go new file mode 100644 index 000000000000..75531739bfd8 --- /dev/null +++ b/pkg/image/dockerlayer/add/add.go @@ -0,0 +1,140 @@ +package add + +import ( + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "runtime" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + digest "github.com/opencontainers/go-digest" + + "github.com/openshift/origin/pkg/image/apis/image/docker10" + "github.com/openshift/origin/pkg/image/dockerlayer" +) + +// get base manifest +// check that I can access base layers +// find the input file (assume I can stream) +// start a streaming upload of the layer to the remote registry, while calculating digests +// get back the final digest +// build the new image manifest and config.json +// upload config.json +// upload the rest of the layers +// tag the image + +const ( + // dockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + dockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // dockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" +) + +// DigestCopy reads all of src into dst, where src is a gzipped stream. It will return the +// sha256 sum of the underlying content (the layerDigest) and the sha256 sum of the +// tar archive (the blobDigest) or an error. If the gzip layer has a modification time +// it will be returned. +// TODO: use configurable digests +func DigestCopy(dst io.ReaderFrom, src io.Reader) (layerDigest, blobDigest digest.Digest, modTime *time.Time, size int64, err error) { + algo := digest.Canonical + // calculate the blob digest as the sha256 sum of the uploaded contents + blobhash := algo.Hash() + // calculate the diffID as the sha256 sum of the layer contents + pr, pw := io.Pipe() + layerhash := algo.Hash() + ch := make(chan error) + go func() { + defer close(ch) + gr, err := gzip.NewReader(pr) + if err != nil { + ch <- fmt.Errorf("unable to create gzip reader layer upload: %v", err) + return + } + if !gr.Header.ModTime.IsZero() { + modTime = &gr.Header.ModTime + } + _, err = io.Copy(layerhash, gr) + ch <- err + }() + + n, err := dst.ReadFrom(io.TeeReader(src, io.MultiWriter(blobhash, pw))) + if err != nil { + return "", "", nil, 0, fmt.Errorf("unable to upload new layer (%d): %v", n, err) + } + if err := pw.Close(); err != nil { + return "", "", nil, 0, fmt.Errorf("unable to complete writing diffID: %v", err) + } + if err := <-ch; err != nil { + return "", "", nil, 0, fmt.Errorf("unable to calculate layer diffID: %v", err) + } + + layerDigest = digest.NewDigestFromBytes(algo, layerhash.Sum(make([]byte, 0, layerhash.Size()))) + blobDigest = digest.NewDigestFromBytes(algo, blobhash.Sum(make([]byte, 0, blobhash.Size()))) + return layerDigest, blobDigest, modTime, n, nil +} + +func NewEmptyConfig() *docker10.DockerImageConfig { + config := &docker10.DockerImageConfig{ + DockerVersion: "", + // Created must be non-zero + Created: (time.Time{}).Add(1 * time.Second), + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + return config +} + +func AddScratchLayerToConfig(config *docker10.DockerImageConfig) distribution.Descriptor { + layer := distribution.Descriptor{ + MediaType: dockerV2Schema2LayerMediaType, + Digest: digest.Digest(dockerlayer.GzippedEmptyLayerDigest), + Size: int64(len(dockerlayer.GzippedEmptyLayer)), + } + AddLayerToConfig(config, layer, dockerlayer.EmptyLayerDiffID) + return layer +} + +func AddLayerToConfig(config *docker10.DockerImageConfig, layer distribution.Descriptor, diffID string) { + if config.RootFS == nil { + config.RootFS = &docker10.DockerConfigRootFS{Type: "layers"} + } + config.RootFS.DiffIDs = append(config.RootFS.DiffIDs, diffID) + config.Size += layer.Size +} + +func UploadSchema2Config(ctx context.Context, blobs distribution.BlobService, config *docker10.DockerImageConfig, layers []distribution.Descriptor) (*schema2.DeserializedManifest, error) { + // ensure the image size is correct before persisting + config.Size = 0 + for _, layer := range layers { + config.Size += layer.Size + } + configJSON, err := json.Marshal(config) + if err != nil { + return nil, err + } + return putSchema2ImageConfig(ctx, blobs, dockerV2Schema2ConfigMediaType, configJSON, layers) +} + +// putSchema2ImageConfig uploads the provided configJSON to the blob store and returns the generated manifest +// for the requested image. +func putSchema2ImageConfig(ctx context.Context, blobs distribution.BlobService, mediaType string, configJSON []byte, layers []distribution.Descriptor) (*schema2.DeserializedManifest, error) { + b := schema2.NewManifestBuilder(blobs, mediaType, configJSON) + for _, layer := range layers { + if err := b.AppendReference(layer); err != nil { + return nil, err + } + } + m, err := b.Build(ctx) + if err != nil { + return nil, err + } + manifest, ok := m.(*schema2.DeserializedManifest) + if !ok { + return nil, fmt.Errorf("unable to turn %T into a DeserializedManifest, unable to store image", m) + } + return manifest, nil +} diff --git a/pkg/oc/cli/cmd/image/append/append.go b/pkg/oc/cli/cmd/image/append/append.go new file mode 100644 index 000000000000..2b942a89b305 --- /dev/null +++ b/pkg/oc/cli/cmd/image/append/append.go @@ -0,0 +1,656 @@ +package append + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "runtime" + "strconv" + "time" + + units "github.com/docker/go-units" + "github.com/golang/glog" + "github.com/spf13/cobra" + + "github.com/docker/distribution" + distributioncontext "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + digest "github.com/opencontainers/go-digest" + + "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/openshift/origin/pkg/image/apis/image/docker10" + imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" + "github.com/openshift/origin/pkg/image/dockerlayer" + "github.com/openshift/origin/pkg/image/dockerlayer/add" + "github.com/openshift/origin/pkg/image/registryclient" + "github.com/openshift/origin/pkg/image/registryclient/dockercredentials" +) + +var ( + desc = templates.LongDesc(` + Add layers to Docker images + + Modifies an existing image by adding layers or changing configuration and then pushes that + image to a remote registry. Any inherited layers are streamed from registry to registry + without being stored locally. The default docker credentials are used for authenticating + to the registries. + + Layers may be provided as arguments to the command and must each be a gzipped tar archive + representing a filesystem overlay to the inherited images. The archive may contain a "whiteout" + file (the prefix '.wh.' and the filename) which will hide files in the lower layers. All + supported filesystem attributes present in the archive will be used as is. + + Metadata about the image (the configuration passed to the container runtime) may be altered + by passing a JSON string to the --image or --meta options. The --image flag changes what + the container runtime sees, while the --meta option allows you to change the attributes of + the image used by the runtime. Use --dry-run to see the result of your changes. You may + add the --drop-history flag to remove information from the image about the system that + built the base image. + + Images in manifest list format will automatically select an image that matches the current + operating system and architecture unless you use --filter-by-os to select a different image. + This flag has no effect on regular images. + + Experimental: This command is under active development and may change without notice.`) + + example = templates.Examples(` +# Remove the entrypoint on the mysql:latest image +%[1]s --from mysql:latest --to myregistry.com/myimage:latest --image {"Entrypoint":null} + +# Add a new layer to the image +%[1]s --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz +`) +) + +type options struct { + Out, ErrOut io.Writer + + From, To string + LayerFiles []string + + ConfigPatch string + MetaPatch string + + DropHistory bool + CreatedAt string + + OSFilter *regexp.Regexp + DefaultOSFilter bool + + FilterByOS string + + MaxPerRegistry int + + DryRun bool + Insecure bool + Force bool +} + +// schema2ManifestOnly specifically requests a manifest list first +var schema2ManifestOnly = distribution.WithManifestMediaTypes([]string{ + manifestlist.MediaTypeManifestList, + schema2.MediaTypeManifest, +}) + +// New creates a new command +func New(name string, out, errOut io.Writer) *cobra.Command { + o := &options{ + MaxPerRegistry: 3, + } + + cmd := &cobra.Command{ + Use: "append", + Short: "Add layers to images and push them to a registry", + Long: desc, + Example: fmt.Sprintf(example, name), + Run: func(c *cobra.Command, args []string) { + o.Out = out + o.ErrOut = errOut + kcmdutil.CheckErr(o.Complete(c, args)) + kcmdutil.CheckErr(o.Run()) + }, + } + + flag := cmd.Flags() + flag.BoolVar(&o.DryRun, "dry-run", o.DryRun, "Print the actions that would be taken and exit without writing to the destination.") + flag.BoolVar(&o.Insecure, "insecure", o.Insecure, "Allow push and pull operations to registries to be made over HTTP") + flag.StringVar(&o.FilterByOS, "filter-by-os", o.FilterByOS, "A regular expression to control which images are mirrored. Images will be passed as '/[/]'.") + + flag.StringVar(&o.From, "from", o.From, "The image to use as a base. If empty, a new scratch image is created.") + flag.StringVar(&o.To, "to", o.To, "The Docker repository tag to upload the appended image to.") + + flag.StringVar(&o.ConfigPatch, "image", o.ConfigPatch, "A JSON patch that will be used with the output image data.") + flag.StringVar(&o.MetaPatch, "meta", o.MetaPatch, "A JSON patch that will be used with image base metadata (advanced config).") + flag.BoolVar(&o.DropHistory, "drop-history", o.DropHistory, "Fields on the image that relate to the history of how the image was created will be removed.") + flag.StringVar(&o.CreatedAt, "created-at", o.CreatedAt, "The creation date for this image, in RFC3339 format or milliseconds from the Unix epoch.") + + flag.BoolVar(&o.Force, "force", o.Force, "If set, the command will attempt to upload all layers instead of skipping those that are already uploaded.") + flag.IntVar(&o.MaxPerRegistry, "max-per-registry", o.MaxPerRegistry, "Number of concurrent requests allowed per registry.") + + return cmd +} + +func (o *options) Complete(cmd *cobra.Command, args []string) error { + pattern := o.FilterByOS + if len(pattern) == 0 && !cmd.Flags().Changed("filter-by-os") { + o.DefaultOSFilter = true + pattern = regexp.QuoteMeta(fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)) + } + if len(pattern) > 0 { + re, err := regexp.Compile(pattern) + if err != nil { + return fmt.Errorf("--filter-by-os was not a valid regular expression: %v", err) + } + o.OSFilter = re + } + + for _, arg := range args { + fi, err := os.Stat(arg) + if err != nil { + return fmt.Errorf("invalid argument: %s", err) + } + if fi.IsDir() { + return fmt.Errorf("invalid argument: %s is a directory", arg) + } + } + o.LayerFiles = args + + return nil +} + +// includeDescriptor returns true if the provided manifest should be included. +func (o *options) includeDescriptor(d *manifestlist.ManifestDescriptor, hasMultiple bool) bool { + if o.OSFilter == nil { + return true + } + if o.DefaultOSFilter && !hasMultiple { + return true + } + if len(d.Platform.Variant) > 0 { + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s/%s", d.Platform.OS, d.Platform.Architecture, d.Platform.Variant)) + } + return o.OSFilter.MatchString(fmt.Sprintf("%s/%s", d.Platform.OS, d.Platform.Architecture)) +} + +func (o *options) Run() error { + var createdAt *time.Time + if len(o.CreatedAt) > 0 { + if d, err := strconv.ParseInt(o.CreatedAt, 10, 64); err == nil { + t := time.Unix(d/1000, (d%1000)*1000000).UTC() + createdAt = &t + } else { + t, err := time.Parse(time.RFC3339, o.CreatedAt) + if err != nil { + return fmt.Errorf("--created-at must be a relative time (2m, -5h) or an RFC3339 formatted date") + } + createdAt = &t + } + } + + var from *imagereference.DockerImageReference + if len(o.From) > 0 { + src, err := imagereference.Parse(o.From) + if err != nil { + return err + } + if len(src.Tag) == 0 && len(src.ID) == 0 { + return fmt.Errorf("--from must point to an image ID or image tag") + } + from = &src + } + to, err := imagereference.Parse(o.To) + if err != nil { + return err + } + if len(to.ID) > 0 { + return fmt.Errorf("--to may not point to an image by ID") + } + + rt, err := rest.TransportFor(&rest.Config{}) + if err != nil { + return err + } + insecureRT, err := rest.TransportFor(&rest.Config{TLSClientConfig: rest.TLSClientConfig{Insecure: true}}) + if err != nil { + return err + } + creds := dockercredentials.NewLocal() + ctx := context.Background() + fromContext := registryclient.NewContext(rt, insecureRT).WithCredentials(creds) + toContext := registryclient.NewContext(rt, insecureRT).WithActions("push").WithCredentials(creds) + + toRepo, err := toContext.Repository(ctx, to.DockerClientDefaults().RegistryURL(), to.RepositoryName(), o.Insecure) + if err != nil { + return err + } + toManifests, err := toRepo.Manifests(ctx) + if err != nil { + return err + } + + var ( + base *docker10.DockerImageConfig + layers []distribution.Descriptor + fromRepo distribution.Repository + ) + if from != nil { + repo, err := fromContext.Repository(ctx, from.DockerClientDefaults().RegistryURL(), from.RepositoryName(), o.Insecure) + if err != nil { + return err + } + fromRepo = repo + var srcDigest digest.Digest + if len(from.Tag) > 0 { + desc, err := repo.Tags(ctx).Get(ctx, from.Tag) + if err != nil { + return err + } + srcDigest = desc.Digest + } else { + srcDigest = digest.Digest(from.ID) + } + manifests, err := repo.Manifests(ctx) + if err != nil { + return err + } + srcManifest, err := manifests.Get(ctx, srcDigest, schema2ManifestOnly) + if err != nil { + return err + } + + originalSrcDigest := srcDigest + srcManifests, srcManifest, srcDigest, err := processManifestList(ctx, srcDigest, srcManifest, manifests, *from, o.includeDescriptor) + if err != nil { + return err + } + if len(srcManifests) == 0 { + return fmt.Errorf("filtered all images from %s", from) + } + + var location string + if srcDigest == originalSrcDigest { + location = fmt.Sprintf("manifest %s", srcDigest) + } else { + location = fmt.Sprintf("manifest %s in manifest list %s", srcDigest, originalSrcDigest) + } + + switch t := srcManifest.(type) { + case *schema2.DeserializedManifest: + if t.Config.MediaType != schema2.MediaTypeImageConfig { + return fmt.Errorf("unable to append layers to images with config %s from %s", t.Config.MediaType, location) + } + configJSON, err := repo.Blobs(ctx).Get(ctx, t.Config.Digest) + if err != nil { + return fmt.Errorf("unable to find manifest for image %s: %v", *from, err) + } + glog.V(4).Infof("Raw image config json:\n%s", string(configJSON)) + config := &docker10.DockerImageConfig{} + if err := json.Unmarshal(configJSON, &config); err != nil { + return fmt.Errorf("the source image manifest could not be parsed: %v", err) + } + + base = config + layers = t.Layers + base.Size = 0 + for _, layer := range t.Layers { + base.Size += layer.Size + } + + case *schema1.SignedManifest: + if glog.V(4) { + _, configJSON, _ := srcManifest.Payload() + glog.Infof("Raw image config json:\n%s", string(configJSON)) + } + if len(t.History) == 0 { + return fmt.Errorf("input image is in an unknown format: no v1Compatibility history") + } + config := &docker10.DockerV1CompatibilityImage{} + if err := json.Unmarshal([]byte(t.History[0].V1Compatibility), &config); err != nil { + return err + } + + base = &docker10.DockerImageConfig{} + if err := docker10.Convert_DockerV1CompatibilityImage_to_DockerImageConfig(config, base); err != nil { + return err + } + + // schema1 layers are in reverse order + layers = make([]distribution.Descriptor, 0, len(t.FSLayers)) + for i := len(t.FSLayers) - 1; i >= 0; i-- { + layer := distribution.Descriptor{ + MediaType: schema2.MediaTypeLayer, + Digest: t.FSLayers[i].BlobSum, + // size must be reconstructed from the blobs + } + // we must reconstruct the tar sum from the blobs + add.AddLayerToConfig(base, layer, "") + layers = append(layers, layer) + } + + default: + return fmt.Errorf("unable to append layers to images of type %T from %s", srcManifest, location) + } + } else { + base = add.NewEmptyConfig() + layers = []distribution.Descriptor{add.AddScratchLayerToConfig(base)} + fromRepo = scratchRepo{} + } + + if base.Config == nil { + base.Config = &docker10.DockerConfig{} + } + + if glog.V(4) { + configJSON, _ := json.MarshalIndent(base, "", " ") + glog.Infof("input config:\n%s\nlayers: %#v", configJSON, layers) + } + + if createdAt == nil { + t := time.Now() + createdAt = &t + } + base.Created = *createdAt + if o.DropHistory { + base.ContainerConfig = docker10.DockerConfig{} + base.History = nil + base.Container = "" + base.DockerVersion = "" + base.Config.Image = "" + } + + if len(o.ConfigPatch) > 0 { + if err := json.Unmarshal([]byte(o.ConfigPatch), base.Config); err != nil { + return fmt.Errorf("unable to patch image from --image: %v", err) + } + } + if len(o.MetaPatch) > 0 { + if err := json.Unmarshal([]byte(o.MetaPatch), base); err != nil { + return fmt.Errorf("unable to patch image from --meta: %v", err) + } + } + + numLayers := len(layers) + toBlobs := toRepo.Blobs(ctx) + + for _, arg := range o.LayerFiles { + err := func() error { + f, err := os.Open(arg) + if err != nil { + return err + } + defer f.Close() + var readerFrom io.ReaderFrom = ioutil.Discard.(io.ReaderFrom) + var done = func(distribution.Descriptor) error { return nil } + if !o.DryRun { + fmt.Fprint(o.Out, "Uploading ... ") + start := time.Now() + bw, err := toBlobs.Create(ctx) + if err != nil { + fmt.Fprintln(o.Out, "failed") + return err + } + readerFrom = bw + defer bw.Close() + done = func(desc distribution.Descriptor) error { + _, err := bw.Commit(ctx, desc) + if err != nil { + fmt.Fprintln(o.Out, "failed") + return err + } + fmt.Fprintf(o.Out, "%s/s\n", units.HumanSize(float64(desc.Size)/float64(time.Now().Sub(start))*float64(time.Second))) + return nil + } + } + layerDigest, blobDigest, modTime, n, err := add.DigestCopy(readerFrom, f) + desc := distribution.Descriptor{ + Digest: blobDigest, + Size: n, + MediaType: schema2.MediaTypeLayer, + } + layers = append(layers, desc) + add.AddLayerToConfig(base, desc, layerDigest.String()) + if modTime != nil && !modTime.IsZero() { + base.Created = *modTime + } + return done(desc) + }() + if err != nil { + return err + } + } + + if o.DryRun { + configJSON, _ := json.MarshalIndent(base, "", " ") + fmt.Fprintf(o.Out, "%s", configJSON) + return nil + } + + // upload base layers in parallel + stopCh := make(chan struct{}) + defer close(stopCh) + q := newWorkQueue(o.MaxPerRegistry, stopCh) + err = q.Try(func(w Try) { + for i := range layers[:numLayers] { + layer := &layers[i] + index := i + missingDiffID := len(base.RootFS.DiffIDs[i]) == 0 + w.Try(func() error { + fromBlobs := fromRepo.Blobs(ctx) + + // check whether the blob exists + if !o.Force { + if desc, err := toBlobs.Stat(ctx, layer.Digest); err == nil { + // ensure the correct size makes it back to the manifest + glog.V(4).Infof("Layer %s already exists in destination (%s)", layer.Digest, units.HumanSizeWithPrecision(float64(layer.Size), 3)) + if layer.Size == 0 { + layer.Size = desc.Size + } + // we need to calculate the tar sum from the image, requiring us to pull it + if missingDiffID { + glog.V(4).Infof("Need tar sum, streaming layer %s", layer.Digest) + r, err := fromBlobs.Open(ctx, layer.Digest) + if err != nil { + return fmt.Errorf("unable to access the layer %s in order to calculate its content ID: %v", layer.Digest, err) + } + defer r.Close() + layerDigest, _, _, _, err := add.DigestCopy(ioutil.Discard.(io.ReaderFrom), r) + if err != nil { + return fmt.Errorf("unable to calculate contentID for layer %s: %v", layer.Digest, err) + } + glog.V(4).Infof("Layer %s has tar sum %s", layer.Digest, layerDigest) + base.RootFS.DiffIDs[index] = layerDigest.String() + } + // TODO: due to a bug in the registry, the empty layer is always returned as existing, but + // an upload without it will fail - https://bugzilla.redhat.com/show_bug.cgi?id=1599028 + if layer.Digest != dockerlayer.GzippedEmptyLayerDigest { + return nil + } + } + } + + // source + r, err := fromBlobs.Open(ctx, layer.Digest) + if err != nil { + return fmt.Errorf("unable to access the source layer %s: %v", layer.Digest, err) + } + defer r.Close() + + // destination + mountOptions := []distribution.BlobCreateOption{WithDescriptor(*layer)} + if from != nil && from.Registry == to.Registry { + source, err := reference.WithDigest(fromRepo.Named(), layer.Digest) + if err != nil { + return err + } + mountOptions = append(mountOptions, client.WithMountFrom(source)) + } + bw, err := toBlobs.Create(ctx, mountOptions...) + if err != nil { + return fmt.Errorf("unable to upload layer %s to destination repository: %v", layer.Digest, err) + } + defer bw.Close() + + // copy the blob, calculating the diffID if necessary + if layer.Size > 0 { + fmt.Fprintf(o.Out, "Uploading %s ...\n", units.HumanSize(float64(layer.Size))) + } else { + fmt.Fprintf(o.Out, "Uploading ...\n") + } + if missingDiffID { + glog.V(4).Infof("Need tar sum, calculating while streaming %s", layer.Digest) + layerDigest, _, _, _, err := add.DigestCopy(bw, r) + if err != nil { + return err + } + glog.V(4).Infof("Layer %s has tar sum %s", layer.Digest, layerDigest) + base.RootFS.DiffIDs[index] = layerDigest.String() + } else { + if _, err := bw.ReadFrom(r); err != nil { + return fmt.Errorf("unable to copy the source layer %s to the destination image: %v", layer.Digest, err) + } + } + desc, err := bw.Commit(ctx, *layer) + if err != nil { + return fmt.Errorf("uploading the source layer %s failed: %v", layer.Digest, err) + } + + // check output + if desc.Digest != layer.Digest { + return fmt.Errorf("when uploading blob %s, got a different returned digest %s", desc.Digest, layer.Digest) + } + // ensure the correct size makes it back to the manifest + if layer.Size == 0 { + layer.Size = desc.Size + } + return nil + }) + } + }) + if err != nil { + return err + } + + manifest, err := add.UploadSchema2Config(ctx, toBlobs, base, layers) + if err != nil { + return fmt.Errorf("unable to upload the new image manifest: %v", err) + } + toDigest, err := putManifestInCompatibleSchema(ctx, manifest, to.Tag, toManifests, fromRepo.Blobs(ctx), toRepo.Named()) + if err != nil { + return fmt.Errorf("unable to convert the image to a compatible schema version: %v", err) + } + fmt.Fprintf(o.Out, "Pushed image %s to %s\n", toDigest, to) + return nil +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithDescriptor returns a BlobCreateOption which provides the expected blob metadata. +func WithDescriptor(desc distribution.Descriptor) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + if opts.Mount.Stat == nil { + opts.Mount.Stat = &desc + } + return nil + }) +} + +func calculateLayerDigest(blobs distribution.BlobService, dgst digest.Digest, readerFrom io.ReaderFrom, r io.Reader) (digest.Digest, error) { + if readerFrom == nil { + readerFrom = ioutil.Discard.(io.ReaderFrom) + } + layerDigest, _, _, _, err := add.DigestCopy(readerFrom, r) + return layerDigest, err +} + +// scratchRepo can serve the scratch image blob. +type scratchRepo struct{} + +var _ distribution.Repository = scratchRepo{} + +func (_ scratchRepo) Named() reference.Named { panic("not implemented") } +func (_ scratchRepo) Tags(ctx distributioncontext.Context) distribution.TagService { + panic("not implemented") +} +func (_ scratchRepo) Manifests(ctx distributioncontext.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + panic("not implemented") +} + +func (r scratchRepo) Blobs(ctx distributioncontext.Context) distribution.BlobStore { return r } + +func (_ scratchRepo) Stat(ctx distributioncontext.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if dgst != dockerlayer.GzippedEmptyLayerDigest { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Digest: digest.Digest(dockerlayer.GzippedEmptyLayerDigest), + Size: int64(len(dockerlayer.GzippedEmptyLayer)), + }, nil +} + +func (_ scratchRepo) Get(ctx distributioncontext.Context, dgst digest.Digest) ([]byte, error) { + if dgst != dockerlayer.GzippedEmptyLayerDigest { + return nil, distribution.ErrBlobUnknown + } + return dockerlayer.GzippedEmptyLayer, nil +} + +type nopCloseBuffer struct { + *bytes.Buffer +} + +func (_ nopCloseBuffer) Seek(offset int64, whence int) (int64, error) { + return 0, nil +} + +func (_ nopCloseBuffer) Close() error { + return nil +} + +func (_ scratchRepo) Open(ctx distributioncontext.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + if dgst != dockerlayer.GzippedEmptyLayerDigest { + return nil, distribution.ErrBlobUnknown + } + return nopCloseBuffer{bytes.NewBuffer(dockerlayer.GzippedEmptyLayer)}, nil +} + +func (_ scratchRepo) Put(ctx distributioncontext.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + panic("not implemented") +} + +func (_ scratchRepo) Create(ctx distributioncontext.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (_ scratchRepo) Resume(ctx distributioncontext.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (_ scratchRepo) ServeBlob(ctx distributioncontext.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (_ scratchRepo) Delete(ctx distributioncontext.Context, dgst digest.Digest) error { + panic("not implemented") +} diff --git a/pkg/oc/cli/cmd/image/append/manifest.go b/pkg/oc/cli/cmd/image/append/manifest.go new file mode 100644 index 000000000000..461f61193f72 --- /dev/null +++ b/pkg/oc/cli/cmd/image/append/manifest.go @@ -0,0 +1,178 @@ +package append + +import ( + "context" + "fmt" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + + "github.com/docker/libtrust" + "github.com/golang/glog" + digest "github.com/opencontainers/go-digest" + + imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" +) + +func processManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor, bool) bool) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { + var srcManifests []distribution.Manifest + switch t := srcManifest.(type) { + case *manifestlist.DeserializedManifestList: + manifestDigest := srcDigest + manifestList := t + + filtered := make([]manifestlist.ManifestDescriptor, 0, len(t.Manifests)) + for _, manifest := range t.Manifests { + if !filterFn(&manifest, len(t.Manifests) > 1) { + glog.V(5).Infof("Skipping image for %#v from %s", manifest.Platform, ref) + continue + } + glog.V(5).Infof("Including image for %#v from %s", manifest.Platform, ref) + filtered = append(filtered, manifest) + } + + if len(filtered) == 0 { + return nil, nil, "", nil + } + + // if we're filtering the manifest list, update the source manifest and digest + if len(filtered) != len(t.Manifests) { + var err error + t, err = manifestlist.FromDescriptors(filtered) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list: %v", ref, err) + } + _, body, err := t.Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to filter source image %s manifest list (bad payload): %v", ref, err) + } + manifestList = t + manifestDigest = srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Filtered manifest list to new digest %s:\n%s", manifestDigest, body) + } + + for i, manifest := range t.Manifests { + childManifest, err := manifests.Get(ctx, manifest.Digest, distribution.WithManifestMediaTypes([]string{manifestlist.MediaTypeManifestList, schema2.MediaTypeManifest})) + if err != nil { + return nil, nil, "", fmt.Errorf("unable to retrieve source image %s manifest #%d from manifest list: %v", ref, i+1, err) + } + srcManifests = append(srcManifests, childManifest) + } + + switch { + case len(srcManifests) == 1: + _, body, err := srcManifests[0].Payload() + if err != nil { + return nil, nil, "", fmt.Errorf("unable to convert source image %s manifest list to single manifest: %v", ref, err) + } + manifestDigest := srcDigest.Algorithm().FromBytes(body) + glog.V(5).Infof("Used only one manifest from the list %s", manifestDigest) + return srcManifests, srcManifests[0], manifestDigest, nil + default: + return append(srcManifests, manifestList), manifestList, manifestDigest, nil + } + + default: + return []distribution.Manifest{srcManifest}, srcManifest, srcDigest, nil + } +} + +// TDOO: remove when quay.io switches to v2 schema +func putManifestInCompatibleSchema( + ctx context.Context, + srcManifest distribution.Manifest, + tag string, + toManifests distribution.ManifestService, + // supports schema2 -> schema1 downconversion + blobs distribution.BlobService, + ref reference.Named, +) (digest.Digest, error) { + var options []distribution.ManifestServiceOption + if len(tag) > 0 { + glog.V(5).Infof("Put manifest %s:%s", ref, tag) + options = []distribution.ManifestServiceOption{distribution.WithTag(tag)} + } else { + glog.V(5).Infof("Put manifest %s", ref) + } + toDigest, err := toManifests.Put(ctx, srcManifest, options...) + if err == nil { + return toDigest, nil + } + errs, ok := err.(errcode.Errors) + if !ok || len(errs) == 0 { + return toDigest, err + } + errcode, ok := errs[0].(errcode.Error) + if !ok || errcode.ErrorCode() != v2.ErrorCodeManifestInvalid { + return toDigest, err + } + // try downconverting to v2-schema1 + schema2Manifest, ok := srcManifest.(*schema2.DeserializedManifest) + if !ok { + return toDigest, err + } + tagRef, tagErr := reference.WithTag(ref, tag) + if tagErr != nil { + return toDigest, err + } + glog.V(5).Infof("Registry reported invalid manifest error, attempting to convert to v2schema1 as ref %s", tagRef) + schema1Manifest, convertErr := convertToSchema1(ctx, blobs, schema2Manifest, tagRef) + if convertErr != nil { + return toDigest, err + } + if glog.V(6) { + _, data, _ := schema1Manifest.Payload() + glog.Infof("Converted to v2schema1\n%s", string(data)) + } + return toManifests.Put(ctx, schema1Manifest, distribution.WithTag(tag)) +} + +// TDOO: remove when quay.io switches to v2 schema +func convertToSchema1(ctx context.Context, blobs distribution.BlobService, schema2Manifest *schema2.DeserializedManifest, ref reference.Named) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + configJSON, err := blobs.Get(ctx, targetDescriptor.Digest) + if err != nil { + return nil, err + } + trustKey, err := loadPrivateKey() + if err != nil { + return nil, err + } + builder := schema1.NewConfigManifestBuilder(blobs, trustKey, ref, configJSON) + for _, d := range schema2Manifest.Layers { + if err := builder.AppendReference(d); err != nil { + return nil, err + } + } + manifest, err := builder.Build(ctx) + if err != nil { + return nil, err + } + return manifest, nil +} + +var ( + privateKeyLock sync.Mutex + privateKey libtrust.PrivateKey +) + +// TDOO: remove when quay.io switches to v2 schema +func loadPrivateKey() (libtrust.PrivateKey, error) { + privateKeyLock.Lock() + defer privateKeyLock.Unlock() + if privateKey != nil { + return privateKey, nil + } + trustKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + privateKey = trustKey + return privateKey, nil +} diff --git a/pkg/oc/cli/cmd/image/append/workqueue.go b/pkg/oc/cli/cmd/image/append/workqueue.go new file mode 100644 index 000000000000..fb57f1a746d4 --- /dev/null +++ b/pkg/oc/cli/cmd/image/append/workqueue.go @@ -0,0 +1,131 @@ +package append + +import ( + "sync" + + "github.com/golang/glog" +) + +type workQueue struct { + ch chan workUnit + wg *sync.WaitGroup +} + +func newWorkQueue(workers int, stopCh <-chan struct{}) *workQueue { + q := &workQueue{ + ch: make(chan workUnit, 100), + wg: &sync.WaitGroup{}, + } + go q.run(workers, stopCh) + return q +} + +func (q *workQueue) run(workers int, stopCh <-chan struct{}) { + for i := 0; i < workers; i++ { + go func(i int) { + defer glog.V(4).Infof("worker %d stopping", i) + for { + select { + case work, ok := <-q.ch: + if !ok { + return + } + work.fn() + work.wg.Done() + case <-stopCh: + return + } + } + }(i) + } + <-stopCh +} + +func (q *workQueue) Batch(fn func(Work)) { + w := &worker{ + wg: &sync.WaitGroup{}, + ch: q.ch, + } + fn(w) + w.wg.Wait() +} + +func (q *workQueue) Try(fn func(Try)) error { + w := &worker{ + wg: &sync.WaitGroup{}, + ch: q.ch, + err: make(chan error), + } + fn(w) + return w.FirstError() +} + +func (q *workQueue) Queue(fn func(Work)) { + w := &worker{ + wg: q.wg, + ch: q.ch, + } + fn(w) +} + +func (q *workQueue) Done() { + q.wg.Wait() +} + +type workUnit struct { + fn func() + wg *sync.WaitGroup +} + +type Work interface { + Parallel(fn func()) +} + +type Try interface { + Try(fn func() error) +} + +type worker struct { + wg *sync.WaitGroup + ch chan workUnit + err chan error +} + +func (w *worker) FirstError() error { + done := make(chan struct{}) + go func() { + w.wg.Wait() + close(done) + }() + for { + select { + case err := <-w.err: + if err != nil { + return err + } + case <-done: + return nil + } + } +} + +func (w *worker) Parallel(fn func()) { + w.wg.Add(1) + w.ch <- workUnit{wg: w.wg, fn: fn} +} + +func (w *worker) Try(fn func() error) { + w.wg.Add(1) + w.ch <- workUnit{ + wg: w.wg, + fn: func() { + err := fn() + if w.err == nil { + // TODO: have the work queue accumulate errors and release them with Done() + glog.Errorf("Worker error: %v", err) + return + } + w.err <- err + }, + } +} diff --git a/pkg/oc/cli/cmd/image/image.go b/pkg/oc/cli/cmd/image/image.go index f47cfa29eb74..12f69894f3c7 100644 --- a/pkg/oc/cli/cmd/image/image.go +++ b/pkg/oc/cli/cmd/image/image.go @@ -9,6 +9,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "github.com/openshift/origin/pkg/cmd/templates" + "github.com/openshift/origin/pkg/oc/cli/cmd/image/append" "github.com/openshift/origin/pkg/oc/cli/cmd/image/mirror" ) @@ -34,6 +35,7 @@ func NewCmdImage(fullName string, f kcmdutil.Factory, streams genericclioptions. { Message: "Advanced commands:", Commands: []*cobra.Command{ + append.New(name, streams.Out, streams.ErrOut), mirror.NewCmdMirrorImage(name, streams.Out, streams.ErrOut), }, }, diff --git a/pkg/oc/cli/cmd/image/mirror/manifest.go b/pkg/oc/cli/cmd/image/mirror/manifest.go index ac775c73f583..081024d0005c 100644 --- a/pkg/oc/cli/cmd/image/mirror/manifest.go +++ b/pkg/oc/cli/cmd/image/mirror/manifest.go @@ -17,10 +17,10 @@ import ( "github.com/golang/glog" digest "github.com/opencontainers/go-digest" - imageapi "github.com/openshift/origin/pkg/image/apis/image" + imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" ) -func processManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imageapi.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor, bool) bool) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { +func processManifestList(ctx context.Context, srcDigest digest.Digest, srcManifest distribution.Manifest, manifests distribution.ManifestService, ref imagereference.DockerImageReference, filterFn func(*manifestlist.ManifestDescriptor, bool) bool) ([]distribution.Manifest, distribution.Manifest, digest.Digest, error) { var srcManifests []distribution.Manifest switch t := srcManifest.(type) { case *manifestlist.DeserializedManifestList: diff --git a/pkg/oc/cli/cmd/image/mirror/mappings.go b/pkg/oc/cli/cmd/image/mirror/mappings.go index 026150b70f66..0df9e2bcd3b6 100644 --- a/pkg/oc/cli/cmd/image/mirror/mappings.go +++ b/pkg/oc/cli/cmd/image/mirror/mappings.go @@ -10,20 +10,20 @@ import ( "github.com/docker/distribution/registry/client/auth" digest "github.com/opencontainers/go-digest" - imageapi "github.com/openshift/origin/pkg/image/apis/image" + "github.com/openshift/origin/pkg/image/apis/image/reference" ) // ErrAlreadyExists may be returned by the blob Create function to indicate that the blob already exists. var ErrAlreadyExists = fmt.Errorf("blob already exists in the target location") type Mapping struct { - Source imageapi.DockerImageReference - Destination imageapi.DockerImageReference + Source reference.DockerImageReference + Destination reference.DockerImageReference Type DestinationType } -func parseSource(ref string) (imageapi.DockerImageReference, error) { - src, err := imageapi.ParseDockerImageReference(ref) +func parseSource(ref string) (reference.DockerImageReference, error) { + src, err := reference.Parse(ref) if err != nil { return src, fmt.Errorf("%q is not a valid image reference: %v", ref, err) } @@ -33,14 +33,14 @@ func parseSource(ref string) (imageapi.DockerImageReference, error) { return src, nil } -func parseDestination(ref string) (imageapi.DockerImageReference, DestinationType, error) { +func parseDestination(ref string) (reference.DockerImageReference, DestinationType, error) { dstType := DestinationRegistry switch { case strings.HasPrefix(ref, "s3://"): dstType = DestinationS3 ref = strings.TrimPrefix(ref, "s3://") } - dst, err := imageapi.ParseDockerImageReference(ref) + dst, err := reference.Parse(ref) if err != nil { return dst, dstType, fmt.Errorf("%q is not a valid image reference: %v", ref, err) } @@ -153,14 +153,14 @@ var ( type destination struct { t DestinationType - ref imageapi.DockerImageReference + ref reference.DockerImageReference tags []string } type pushTargets map[key]destination type destinations struct { - ref imageapi.DockerImageReference + ref reference.DockerImageReference lock sync.Mutex tags map[string]pushTargets diff --git a/pkg/oc/cli/cmd/image/mirror/mirror.go b/pkg/oc/cli/cmd/image/mirror/mirror.go index 1282e3eb8e58..80508e3254cc 100644 --- a/pkg/oc/cli/cmd/image/mirror/mirror.go +++ b/pkg/oc/cli/cmd/image/mirror/mirror.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - imageapi "github.com/openshift/origin/pkg/image/apis/image" + imagereference "github.com/openshift/origin/pkg/image/apis/image/reference" "github.com/openshift/origin/pkg/image/registryclient" "github.com/openshift/origin/pkg/image/registryclient/dockercredentials" ) @@ -171,7 +171,7 @@ func (o *pushOptions) Complete(args []string) error { return nil } -func (o *pushOptions) Repository(ctx context.Context, context *registryclient.Context, t DestinationType, ref imageapi.DockerImageReference) (distribution.Repository, error) { +func (o *pushOptions) Repository(ctx context.Context, context *registryclient.Context, t DestinationType, ref imagereference.DockerImageReference) (distribution.Repository, error) { switch t { case DestinationRegistry: return context.Repository(ctx, ref.DockerClientDefaults().RegistryURL(), ref.RepositoryName(), o.Insecure) diff --git a/pkg/oc/cli/cmd/image/mirror/plan.go b/pkg/oc/cli/cmd/image/mirror/plan.go index 89bfb5f5f9da..050c51198b39 100644 --- a/pkg/oc/cli/cmd/image/mirror/plan.go +++ b/pkg/oc/cli/cmd/image/mirror/plan.go @@ -11,12 +11,13 @@ import ( units "github.com/docker/go-units" godigest "github.com/opencontainers/go-digest" - imageapi "github.com/openshift/origin/pkg/image/apis/image" "k8s.io/apimachinery/pkg/util/sets" + + "github.com/openshift/origin/pkg/image/apis/image/reference" ) type retrieverError struct { - src, dst imageapi.DockerImageReference + src, dst reference.DockerImageReference err error } @@ -399,7 +400,7 @@ func (p *repositoryPlan) AddError(errs ...error) { p.errs = append(p.errs, errs...) } -func (p *repositoryPlan) Blobs(from imageapi.DockerImageReference, t DestinationType, location string) *repositoryBlobCopy { +func (p *repositoryPlan) Blobs(from reference.DockerImageReference, t DestinationType, location string) *repositoryBlobCopy { p.lock.Lock() defer p.lock.Unlock() @@ -412,7 +413,7 @@ func (p *repositoryPlan) Blobs(from imageapi.DockerImageReference, t Destination parent: p, fromRef: from, - toRef: imageapi.DockerImageReference{Registry: p.parent.name, Name: p.name}, + toRef: reference.DockerImageReference{Registry: p.parent.name, Name: p.name}, destinationType: t, location: location, @@ -436,7 +437,7 @@ func (p *repositoryPlan) Manifests(destinationType DestinationType) *repositoryM if p.manifests == nil { p.manifests = &repositoryManifestPlan{ parent: p, - toRef: imageapi.DockerImageReference{Registry: p.parent.name, Name: p.name}, + toRef: reference.DockerImageReference{Registry: p.parent.name, Name: p.name}, destinationType: destinationType, digestsToTags: make(map[godigest.Digest]sets.String), digestCopies: sets.NewString(), @@ -494,8 +495,8 @@ func (p *repositoryPlan) calculateStats(registryCounts map[string]int) { type repositoryBlobCopy struct { parent *repositoryPlan - fromRef imageapi.DockerImageReference - toRef imageapi.DockerImageReference + fromRef reference.DockerImageReference + toRef reference.DockerImageReference destinationType DestinationType location string @@ -551,7 +552,7 @@ func (p *repositoryBlobCopy) calculateStats() { type repositoryManifestPlan struct { parent *repositoryPlan - toRef imageapi.DockerImageReference + toRef reference.DockerImageReference destinationType DestinationType lock sync.Mutex diff --git a/test/extended/images/append.go b/test/extended/images/append.go new file mode 100644 index 000000000000..f4773f7ba5b8 --- /dev/null +++ b/test/extended/images/append.go @@ -0,0 +1,141 @@ +package images + +import ( + "fmt" + "strings" + + "github.com/MakeNowJust/heredoc" + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + + kapiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/origin/pkg/image/dockerlayer" + exutil "github.com/openshift/origin/test/extended/util" +) + +func cliPodWithPullSecret(cli *exutil.CLI, shell string) *kapiv1.Pod { + err := exutil.WaitForServiceAccount(cli.KubeClient().Core().ServiceAccounts(cli.Namespace()), "builder") + o.Expect(err).NotTo(o.HaveOccurred()) + sa, err := cli.KubeClient().Core().ServiceAccounts(cli.Namespace()).Get("builder", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(sa.ImagePullSecrets).NotTo(o.BeEmpty()) + pullSecretName := sa.ImagePullSecrets[0].Name + + // best effort to get the format string for the release + router, err := cli.AdminAppsClient().Apps().DeploymentConfigs("default").Get("router", metav1.GetOptions{}) + if err != nil { + g.Fail(fmt.Sprintf("Unable to find router in order to query format string: %v", err)) + } + cliImage := strings.Replace(router.Spec.Template.Spec.Containers[0].Image, "haproxy-router", "cli", 1) + + return &kapiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "append-test", + }, + Spec: kapiv1.PodSpec{ + // so we have permission to push and pull to the registry + ServiceAccountName: "builder", + RestartPolicy: kapiv1.RestartPolicyNever, + Containers: []kapiv1.Container{ + { + Name: "test", + Image: cliImage, + Command: []string{"/bin/bash", "-c", "set -euo pipefail; " + shell}, + Env: []kapiv1.EnvVar{ + { + Name: "HOME", + Value: "/secret", + }, + }, + VolumeMounts: []kapiv1.VolumeMount{ + { + Name: "pull-secret", + MountPath: "/secret/.dockercfg", + SubPath: kapiv1.DockerConfigKey, + }, + }, + }, + }, + Volumes: []kapiv1.Volume{ + { + Name: "pull-secret", + VolumeSource: kapiv1.VolumeSource{ + Secret: &kapiv1.SecretVolumeSource{ + SecretName: pullSecretName, + }, + }, + }, + }, + }, + } +} + +var _ = g.Describe("[Feature:ImageAppend] Image append", func() { + defer g.GinkgoRecover() + + var oc *exutil.CLI + var ns string + + g.AfterEach(func() { + if g.CurrentGinkgoTestDescription().Failed && len(ns) > 0 { + exutil.DumpPodLogsStartingWithInNamespace("", ns, oc) + } + }) + + oc = exutil.NewCLI("image-append", exutil.KubeConfigPath()) + + g.It("should create images by appending them", func() { + ns = oc.Namespace() + cli := oc.KubeFramework().PodClient() + pod := cli.Create(cliPodWithPullSecret(oc, heredoc.Docf(` + set -x + + # create a scratch image with fixed date + oc image append --insecure --to docker-registry.default.svc:5000/%[1]s/test:scratch1 --image='{"Cmd":["/bin/sleep"]}' --created-at=0 + + # create a second scratch image with fixed date + oc image append --insecure --to docker-registry.default.svc:5000/%[1]s/test:scratch2 --image='{"Cmd":["/bin/sleep"]}' --created-at=0 + + # modify a busybox image + oc image append --insecure --from=docker.io/library/busybox:latest --to docker-registry.default.svc:5000/%[1]s/test:busybox1 --image='{"Cmd":["/bin/sleep"]}' + + # add a simple layer to the image + mkdir -p /tmp/test/dir + touch /tmp/test/1 + touch /tmp/test/dir/2 + tar cvzf /tmp/layer.tar.gz -C /tmp/test/ . + oc image append --insecure --from=docker-registry.default.svc:5000/%[1]s/test:busybox1 --to docker-registry.default.svc:5000/%[1]s/test:busybox2 /tmp/layer.tar.gz + `, ns))) + cli.WaitForSuccess(pod.Name, podStartupTimeout) + + istag, err := oc.ImageClient().Image().ImageStreamTags(ns).Get("test:scratch1", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(istag.Image).NotTo(o.BeNil()) + o.Expect(istag.Image.DockerImageLayers).To(o.HaveLen(1)) + o.Expect(istag.Image.DockerImageLayers[0].Name).To(o.Equal(dockerlayer.GzippedEmptyLayerDigest)) + o.Expect(istag.Image.DockerImageMetadata.Config.Cmd).To(o.Equal([]string{"/bin/sleep"})) + + istag2, err := oc.ImageClient().Image().ImageStreamTags(ns).Get("test:scratch2", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(istag2.Image).NotTo(o.BeNil()) + o.Expect(istag2.Image.Name).To(o.Equal(istag.Image.Name)) + + istag, err = oc.ImageClient().Image().ImageStreamTags(ns).Get("test:busybox1", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(istag.Image).NotTo(o.BeNil()) + o.Expect(istag.Image.DockerImageLayers).To(o.HaveLen(1)) + o.Expect(istag.Image.DockerImageLayers[0].Name).NotTo(o.Equal(dockerlayer.GzippedEmptyLayerDigest)) + o.Expect(istag.Image.DockerImageMetadata.Config.Cmd).To(o.Equal([]string{"/bin/sleep"})) + busyboxLayer := istag.Image.DockerImageLayers[0].Name + + istag, err = oc.ImageClient().Image().ImageStreamTags(ns).Get("test:busybox2", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(istag.Image).NotTo(o.BeNil()) + o.Expect(istag.Image.DockerImageLayers).To(o.HaveLen(2)) + o.Expect(istag.Image.DockerImageLayers[0].Name).To(o.Equal(busyboxLayer)) + o.Expect(istag.Image.DockerImageLayers[1].LayerSize).NotTo(o.Equal(0)) + o.Expect(istag.Image.DockerImageMetadata.Config.Cmd).To(o.Equal([]string{"/bin/sleep"})) + }) +})