diff --git a/glide.lock b/glide.lock index 201a17753f4f..2c5c10e21a64 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 0f36ad5a96d6f9627f03e21e71a9c69e36e7adc0719c5728abfa9ecc4e9ab644 -updated: 2017-12-08T09:07:15.649059681-05:00 +hash: 9b485ae9459521abe626017a55313b4308ac73cd0321a14bb49d25e816aaebd4 +updated: 2017-12-14T15:33:35.768942902-05:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -178,13 +178,9 @@ imports: - types - version - name: github.com/containers/storage - version: 46ef35348492d492e4671938a1993a315e4ad30f + version: 47536c89fcc545a87745e1a1573addc439409165 subpackages: - - pkg/fileutils - pkg/homedir - - pkg/idtools - - pkg/mount - - pkg/system - name: github.com/coreos/etcd version: e211fb6de3cb306aee245422e599ac521f6e21f7 subpackages: @@ -705,8 +701,6 @@ imports: version: 2788f0dbd16903de03cb8186e5c7d97b69ad387b - name: github.com/kr/pty version: f7ee69f31298ecbe5d2b349c711e2547a617d398 -- name: github.com/kubernetes-incubator/cri-o - version: a8ee86b1cce0c13bd541a99140682a92635ba9f7 - name: github.com/lestrrat/go-jspointer version: f4881e611bdbe9fb413a7780721ef8400a1f2341 - name: github.com/lestrrat/go-jsref @@ -847,7 +841,7 @@ imports: - go-selinux - go-selinux/label - name: github.com/openshift/api - version: 1ece25cd3e05d47db70fab18c2cf9a943741b912 + version: 420fe685a7b5a26ce1ca907081270e7218bd7920 subpackages: - apps/v1 - authorization/v1 @@ -1254,7 +1248,7 @@ imports: - name: gopkg.in/yaml.v2 version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 - name: k8s.io/api - version: abe47f56cf980731ebaeddba938f4fcc21527e2b + version: 54d98ad4baf0e046bc3b6f7d159ca009de7843c0 repo: git@github.com:openshift/kubernetes-api subpackages: - admission/v1beta1 @@ -1288,7 +1282,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: d10b248182df206df1bc1de8bc7f1a16cdad2f1c + version: d1af2d75c27f2415e054c9e87f4e4f461e9b8459 repo: git@github.com:openshift/kubernetes-apiextensions-apiserver subpackages: - pkg/apis/apiextensions @@ -1320,7 +1314,7 @@ imports: - pkg/registry/customresource - pkg/registry/customresourcedefinition - name: k8s.io/apimachinery - version: 0ede59012bac185794d667987ff32eecd524390d + version: 9666710a63d6c2411d9544d5f9d31fa4f9043536 repo: git@github.com:openshift/kubernetes-apimachinery subpackages: - pkg/api/equality @@ -1386,7 +1380,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 7b3c0f9c1c61eff7c227a729f6af7b1d8cd1108c + version: 2b30e13a0d6652b75408c85e078702842a0128a2 repo: git@github.com:openshift/kubernetes-apiserver subpackages: - pkg/admission @@ -1446,7 +1440,9 @@ imports: - pkg/registry/generic - pkg/registry/generic/registry - pkg/registry/generic/rest + - pkg/registry/generic/testing - pkg/registry/rest + - pkg/registry/rest/resttest - pkg/server - pkg/server/filters - pkg/server/healthz @@ -1470,6 +1466,7 @@ imports: - pkg/storage/names - pkg/storage/storagebackend - pkg/storage/storagebackend/factory + - pkg/storage/testing - pkg/storage/value - pkg/storage/value/encrypt/aes - pkg/storage/value/encrypt/envelope @@ -1492,7 +1489,7 @@ imports: - plugin/pkg/authenticator/token/webhook - plugin/pkg/authorizer/webhook - name: k8s.io/client-go - version: cc4f56d30cb3feb36ca21b873a7bbffafe53deb3 + version: 0752038789137ee7795faa62a088d76b1372abf9 repo: git@github.com:openshift/kubernetes-client-go subpackages: - discovery @@ -1670,7 +1667,7 @@ imports: - util/testing - util/workqueue - name: k8s.io/code-generator - version: 004ef858db39afcedf99c8cb82f6666865f9bea9 + version: 0ea404e72920cc2978c0bfcc05a37528e01e5bcf repo: git@github.com:openshift/kubernetes-code-generator subpackages: - cmd/conversion-gen/generators @@ -1691,7 +1688,7 @@ imports: subpackages: - metrics/api/v1/types - name: k8s.io/kube-aggregator - version: 2f38004cf61487c376ee575ddcabfefaa4732bcd + version: 82dbda776181ed57aa38c5434c905251bf6fdab1 repo: git@github.com:openshift/kube-aggregator subpackages: - pkg/apis/apiregistration @@ -1728,7 +1725,7 @@ imports: - pkg/util/proto - pkg/util/proto/validation - name: k8s.io/kubernetes - version: 3258431ac4f8e0f1425711b140de793ce20702e3 + version: 350c920997b060eb37f74c870b7b320619cbcfa7 repo: git@github.com:openshift/kubernetes subpackages: - cmd/genutils @@ -1740,7 +1737,6 @@ imports: - cmd/kubeadm/app/apis/kubeadm/fuzzer - cmd/kubelet/app - cmd/kubelet/app/options - - pkg/api - pkg/api/endpoints - pkg/api/events - pkg/api/legacyscheme @@ -2479,7 +2475,7 @@ imports: - third_party/forked/gonum/graph/simple - third_party/forked/gonum/graph/traverse - name: k8s.io/metrics - version: fa5226d7bbc5983151218570874fb1cb59a66fe0 + version: 316e6926aa457ce7549a24119a9c0098cae844c1 repo: git@github.com:openshift/kubernetes-metrics subpackages: - pkg/apis/custom_metrics diff --git a/glide.yaml b/glide.yaml index 899e933c6754..4ef7fd92ee47 100644 --- a/glide.yaml +++ b/glide.yaml @@ -67,14 +67,17 @@ import: version: release-3.9 - package: github.com/openshift/client-go version: release-3.9 -# only used by pkg/build/builder -- package: github.com/kubernetes-incubator/cri-o - version: a8ee86b1cce0c13bd541a99140682a92635ba9f7 - package: github.com/containers/image repo: git@github.com:openshift/containers-image version: openshift-3.8 +# yours: containers/image. No idea why this isn't self-pinning. +- package: github.com/containers/storage + version: 47536c89fcc545a87745e1a1573addc439409165 - package: github.com/vjeantet/ldapserver version: v1.0 +# ours: containers/storage uses it, but oc/bootstrap does too. pinning to last level +- package: github.com/docker/engine-api + version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd # ours because of genapidocs. This is the current kube level - package: github.com/go-openapi/loads diff --git a/pkg/build/builder/Godeps/Godeps.json b/pkg/build/builder/Godeps/Godeps.json index a427e965053a..69cd96be8ee6 100644 --- a/pkg/build/builder/Godeps/Godeps.json +++ b/pkg/build/builder/Godeps/Godeps.json @@ -768,6 +768,10 @@ "ImportPath": "github.com/openshift/source-to-image/pkg/util/user", "Comment": "v1.1.7-63-ge3140d0", "Rev": "e3140d019517368c7c3f72476f9cae7a8b1269d0" + }, + { + "ImportPath": "github.com/kubernetes-incubator/cri-o", + "Rev": "a8ee86b1cce0c13bd541a99140682a92635ba9f7" } ] } diff --git a/vendor/github.com/kubernetes-incubator/cri-o/.gitignore b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/.gitignore similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/.gitignore rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/.gitignore diff --git a/vendor/github.com/kubernetes-incubator/cri-o/.tool/lint b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/.tool/lint similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/.tool/lint rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/.tool/lint diff --git a/vendor/github.com/kubernetes-incubator/cri-o/.travis.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/.travis.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/.travis.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/.travis.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/Dockerfile b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/Dockerfile similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/Dockerfile rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/Dockerfile diff --git a/vendor/github.com/kubernetes-incubator/cri-o/KPOD_VERSION b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/KPOD_VERSION similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/KPOD_VERSION rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/KPOD_VERSION diff --git a/vendor/github.com/kubernetes-incubator/cri-o/LICENSE b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/LICENSE similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/LICENSE rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/LICENSE diff --git a/vendor/github.com/kubernetes-incubator/cri-o/Makefile b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/Makefile similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/Makefile rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/Makefile diff --git a/vendor/github.com/kubernetes-incubator/cri-o/OWNERS b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/OWNERS similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/OWNERS rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/OWNERS diff --git a/vendor/github.com/kubernetes-incubator/cri-o/README.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/README.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/README.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/README.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/VERSION b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/VERSION similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/VERSION rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/VERSION diff --git a/vendor/github.com/kubernetes-incubator/cri-o/client/client.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/client/client.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/client/client.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/client/client.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/config.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/config.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/config.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/config.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/daemon_linux.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/daemon_linux.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/daemon_linux.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/daemon_linux.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/main.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/main.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/main.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crio/main.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/container.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/container.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/container.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/container.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/image.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/image.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/image.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/image.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/info.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/info.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/info.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/info.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/main.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/main.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/main.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/main.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/sandbox.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/sandbox.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/sandbox.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/sandbox.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/system.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/system.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/system.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/crioctl/system.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/README.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/README.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/README.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/README.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common_test.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common_test.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common_test.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/common_test.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/diff.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/diff.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/diff.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/diff.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/docker/types.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/docker/types.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/docker/types.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/docker/types.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/export.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/export.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/export.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/export.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/formats.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/formats.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/formats.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/formats.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/templates.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/templates.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/templates.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/formats/templates.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/history.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/history.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/history.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/history.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/images.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/info.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/info.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/info.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/info.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/inspect.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/inspect.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/inspect.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/inspect.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/load.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/load.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/load.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/load.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/logs.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/logs.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/logs.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/logs.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/main.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/main.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/main.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/main.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/mount.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/mount.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/mount.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/mount.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/ps.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/ps.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/ps.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/ps.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/pull.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/pull.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/pull.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/pull.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/push.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/push.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/push.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/push.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rename.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rename.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rename.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rename.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rm.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rm.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rm.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rm.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rmi.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rmi.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rmi.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/rmi.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/save.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/save.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/save.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/save.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stats.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stats.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stats.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stats.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stop.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stop.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stop.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/stop.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/tag.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/tag.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/tag.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/tag.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/umount.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/umount.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/umount.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/umount.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/version.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/version.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/version.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/cmd/kpod/version.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/code-of-conduct.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/code-of-conduct.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/code-of-conduct.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/code-of-conduct.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/completions/bash/kpod b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/completions/bash/kpod similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/completions/bash/kpod rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/completions/bash/kpod diff --git a/vendor/github.com/kubernetes-incubator/cri-o/conmon/Makefile b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/Makefile similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/conmon/Makefile rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/Makefile diff --git a/vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.c b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.c similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.c rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.c diff --git a/vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.h b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.h similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.h rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/cmsg.h diff --git a/vendor/github.com/kubernetes-incubator/cri-o/conmon/conmon.c b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/conmon.c similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/conmon/conmon.c rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/conmon/conmon.c diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/10-crio-bridge.conf b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/10-crio-bridge.conf similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/10-crio-bridge.conf rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/10-crio-bridge.conf diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/99-loopback.conf b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/99-loopback.conf similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/99-loopback.conf rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/99-loopback.conf diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/README.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/README.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/README.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/cni/README.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/Makefile b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/Makefile similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/Makefile rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/Makefile diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/crio.spec b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/crio.spec similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/crio.spec rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/rpm/crio.spec diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio-shutdown.service b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio-shutdown.service similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio-shutdown.service rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio-shutdown.service diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio.service b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio.service similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio.service rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/systemd/crio.service diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/bats.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/bats.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/bats.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/bats.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-o.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-o.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-o.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-o.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-tools.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-tools.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-tools.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/cri-tools.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/kubernetes.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/kubernetes.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/kubernetes.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/kubernetes.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/plugins.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/plugins.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/plugins.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/plugins.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/runc.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/runc.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/runc.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/build/runc.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/e2e.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/e2e.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/e2e.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/e2e.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/golang.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/golang.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/golang.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/golang.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/main.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/main.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/main.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/main.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/system.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/system.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/system.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/system.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/test.yml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/test.yml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/test.yml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/integration/test.yml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/requirements.txt b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/requirements.txt similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/requirements.txt rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/requirements.txt diff --git a/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/venv-ansible-playbook.sh b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/venv-ansible-playbook.sh similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/contrib/test/venv-ansible-playbook.sh rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/contrib/test/venv-ansible-playbook.sh diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/crio.8.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/crio.8.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/crio.8.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/crio.8.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/crio.conf.5.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/crio.conf.5.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/crio.conf.5.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/crio.conf.5.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-cp.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-cp.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-cp.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-cp.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-diff.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-diff.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-diff.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-diff.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-export.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-export.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-export.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-export.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-history.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-history.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-history.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-history.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-images.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-images.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-images.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-images.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-info.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-info.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-info.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-info.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-inspect.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-inspect.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-inspect.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-inspect.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-load.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-load.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-load.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-load.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-logs.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-logs.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-logs.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-logs.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-mount.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-mount.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-mount.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-mount.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-ps.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-ps.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-ps.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-ps.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-pull.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-pull.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-pull.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-pull.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-push.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-push.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-push.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-push.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rename.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rename.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rename.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rename.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rm.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rm.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rm.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rm.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rmi.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rmi.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rmi.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-rmi.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-save.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-save.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-save.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-save.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stats.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stats.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stats.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stats.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stop.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stop.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stop.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-stop.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-tag.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-tag.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-tag.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-tag.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-umount.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-umount.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-umount.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-umount.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-version.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-version.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-version.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod-version.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod.1.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod.1.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/kpod.1.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/kpod.1.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/docs/play.png b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/play.png similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/docs/play.png rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/docs/play.png diff --git a/vendor/github.com/kubernetes-incubator/cri-o/hack/btrfs_tag.sh b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/btrfs_tag.sh similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/hack/btrfs_tag.sh rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/btrfs_tag.sh diff --git a/vendor/github.com/kubernetes-incubator/cri-o/hack/find-godeps.sh b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/find-godeps.sh similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/hack/find-godeps.sh rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/find-godeps.sh diff --git a/vendor/github.com/kubernetes-incubator/cri-o/hack/libdm_tag.sh b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/libdm_tag.sh similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/hack/libdm_tag.sh rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/libdm_tag.sh diff --git a/vendor/github.com/kubernetes-incubator/cri-o/hack/verify-gofmt.sh b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/verify-gofmt.sh similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/hack/verify-gofmt.sh rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hack/verify-gofmt.sh diff --git a/vendor/github.com/kubernetes-incubator/cri-o/hooks.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hooks.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/hooks.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/hooks.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/kpod-images.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/kpod-images.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/kpod-images.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/kpod-images.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/kubernetes.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/kubernetes.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/kubernetes.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/kubernetes.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/config.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/config.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/config.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/config.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/config_test.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/config_test.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/config_test.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/config_test.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/container.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_data.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_data.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_data.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_data.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_server.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_server.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_server.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/container_server.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/diff.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/diff.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/diff.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/diff.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/hooks.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/hooks.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/hooks.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/hooks.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/logs.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/logs.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/logs.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/logs.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/remove.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/remove.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/remove.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/remove.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/rename.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/rename.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/rename.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/rename.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/sandbox/sandbox.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/sandbox/sandbox.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/sandbox/sandbox.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/sandbox/sandbox.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/stats.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/stats.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/stats.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/stats.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/stop.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/stop.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/stop.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/stop.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libkpod/testdata/config.toml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/testdata/config.toml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libkpod/testdata/config.toml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libkpod/testdata/config.toml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/common.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/common.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/common/common.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/common.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/docker_registry_options.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/docker_registry_options.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/common/docker_registry_options.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/docker_registry_options.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/output_interfaces.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/output_interfaces.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/common/output_interfaces.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/output_interfaces.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/signing_options.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/signing_options.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/common/signing_options.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/common/signing_options.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/ctr/container.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/ctr/container.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/ctr/container.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/ctr/container.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/driver/driver.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/driver/driver.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/driver/driver.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/driver/driver.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/image.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/image.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/image.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/image.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_data.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_data.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_data.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_data.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_ref.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_ref.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_ref.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/copy_ref.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image_data.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image_data.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image_data.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/image_data.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/rmi.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/rmi.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/images/rmi.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/images/rmi.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/layers/layer.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/layers/layer.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/layers/layer.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/layers/layer.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/options.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/options.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/options.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/options.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/pod/pod.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/pod/pod.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/pod/pod.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/pod/pod.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/libpod/runtime.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/runtime.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/libpod/runtime.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/libpod/runtime.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo-source.svg b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo-source.svg similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo-source.svg rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo-source.svg diff --git a/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.png b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.png similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.png rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.png diff --git a/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.svg b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.svg similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.svg rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/logo/crio-logo.svg diff --git a/vendor/github.com/kubernetes-incubator/cri-o/oci/container.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/container.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/oci/container.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/container.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/oci/finished.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/finished.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/oci/finished.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/finished.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/oci/finished_32.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/finished_32.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/oci/finished_32.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/finished_32.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/oci/history.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/history.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/oci/history.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/history.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/oci/memory_store.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/memory_store.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/oci/memory_store.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/memory_store.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/oci/oci.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/oci.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/oci/oci.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/oci.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/oci/store.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/store.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/oci/store.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/oci/store.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pause/.gitignore b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pause/.gitignore similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pause/.gitignore rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pause/.gitignore diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pause/Makefile b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pause/Makefile similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pause/Makefile rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pause/Makefile diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pause/pause.c b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pause/pause.c similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pause/pause.c rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pause/pause.c diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pkg/annotations/annotations.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/annotations/annotations.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pkg/annotations/annotations.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/annotations/annotations.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar_test.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar_test.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar_test.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/registrar/registrar_test.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/doc.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/doc.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/doc.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/doc.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image_regexp.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image_regexp.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image_regexp.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/image_regexp.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/runtime.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/runtime.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/runtime.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/pkg/storage/runtime.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/seccomp.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/seccomp.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/seccomp.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/seccomp.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/aaparser.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/aaparser.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/aaparser.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/aaparser.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_common.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_common.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_common.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_common.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_supported.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_supported.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_supported.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_supported.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_unsupported.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_unsupported.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_unsupported.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/apparmor_unsupported.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/template.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/template.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/template.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/apparmor/template.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/config.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/config.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/config.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/config.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_attach.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_attach.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_attach.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_attach.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_create.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_create.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_create.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_create.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_exec.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_exec.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_exec.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_exec.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_execsync.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_execsync.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_execsync.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_execsync.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_list.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_list.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_list.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_list.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_portforward.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_portforward.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_portforward.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_portforward.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_remove.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_remove.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_remove.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_remove.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_start.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_start.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_start.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_start.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_stats.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_stats.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_stats.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_stats.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_stats_list.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_stats_list.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_stats_list.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_stats_list.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_status.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_status.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_status.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_status.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_stop.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_stop.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_stop.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_stop.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/container_updateruntimeconfig.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_updateruntimeconfig.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/container_updateruntimeconfig.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/container_updateruntimeconfig.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/image_fs_info.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_fs_info.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/image_fs_info.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_fs_info.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/image_list.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_list.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/image_list.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_list.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/image_pull.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_pull.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/image_pull.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_pull.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/image_remove.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_remove.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/image_remove.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_remove.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/image_status.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_status.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/image_status.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/image_status.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/inspect.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/inspect.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/inspect.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/inspect.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/inspect_test.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/inspect_test.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/inspect_test.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/inspect_test.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/naming.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/naming.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/naming.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/naming.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/runtime_status.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/runtime_status.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/runtime_status.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/runtime_status.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_list.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_list.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_list.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_list.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_network.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_network.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_network.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_network.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_remove.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_remove.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_remove.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_remove.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_run.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_run.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_run.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_run.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_status.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_status.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_status.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_status.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_stop.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_stop.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_stop.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/sandbox_stop.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp_unsupported.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp_unsupported.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp_unsupported.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/seccomp_unsupported.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/types.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/types.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/types.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/seccomp/types.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/server.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/server.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/server.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/server.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/utils.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/utils.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/utils.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/utils.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/server/version.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/version.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/server/version.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/server/version.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/README.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/README.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/README.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/README.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/apparmor.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/apparmor.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/apparmor.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/apparmor.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/bin2img/bin2img.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/bin2img/bin2img.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/bin2img/bin2img.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/bin2img/bin2img.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/cgroups.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/cgroups.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/cgroups.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/cgroups.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/checkseccomp/checkseccomp.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/checkseccomp/checkseccomp.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/checkseccomp/checkseccomp.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/checkseccomp/checkseccomp.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/copyimg/copyimg.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/copyimg/copyimg.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/copyimg/copyimg.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/copyimg/copyimg.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/ctr.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/ctr.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/ctr.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/ctr.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/helpers.bash b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/helpers.bash similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/helpers.bash rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/helpers.bash diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/hooks.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/hooks.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/hooks.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/hooks.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.sh b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.sh similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.sh rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/hooks/checkhook.sh diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/image.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/image.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/image.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/image.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/image_volume.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/image_volume.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/image_volume.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/image_volume.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/inspect.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/inspect.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/inspect.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/inspect.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_diff.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_diff.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_diff.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_diff.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_export.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_export.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_export.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_export.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_load.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_load.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_load.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_load.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_logs.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_logs.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_logs.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_logs.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_mount.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_mount.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_mount.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_mount.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_ps.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_ps.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_ps.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_ps.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_pull.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_pull.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_pull.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_pull.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rename.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rename.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rename.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rename.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rm.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rm.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rm.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_rm.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_save.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_save.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_save.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_save.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stats.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stats.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stats.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stats.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stop.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stop.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stop.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/kpod_stop.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/network.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/network.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/network.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/network.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/pod.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/pod.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/pod.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/pod.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/policy.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/policy.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/policy.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/policy.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/redhat_sigstore.yaml b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/redhat_sigstore.yaml similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/redhat_sigstore.yaml rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/redhat_sigstore.yaml diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/registries.conf b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/registries.conf similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/registries.conf rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/registries.conf diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/restore.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/restore.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/restore.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/restore.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/runtimeversion.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/runtimeversion.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/runtimeversion.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/runtimeversion.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/seccomp.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/seccomp.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/seccomp.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/seccomp.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/selinux.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/selinux.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/selinux.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/selinux.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/tag.bats b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/tag.bats similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/tag.bats rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/tag.bats diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/test_runner.sh b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/test_runner.sh similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/test_runner.sh rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/test_runner.sh diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/README.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/README.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/README.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/README.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/apparmor_test_deny_write b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/apparmor_test_deny_write similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/apparmor_test_deny_write rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/apparmor_test_deny_write diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_by_imageid.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_by_imageid.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_by_imageid.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_by_imageid.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_hostport.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_hostport.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_hostport.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_hostport.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_logging.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_logging.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_logging.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_logging.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf_ro.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf_ro.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf_ro.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_resolvconf_ro.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_seccomp.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_seccomp.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_seccomp.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_config_seccomp.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_exit_test.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_exit_test.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_exit_test.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_exit_test.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis_device.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis_device.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis_device.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/container_redis_device.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/fake_ocid_default b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/fake_ocid_default similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/fake_ocid_default rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/fake_ocid_default diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostnet.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostnet.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostnet.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostnet.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostport.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostport.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostport.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_hostport.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_seccomp.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_seccomp.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_seccomp.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_seccomp.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_selinux.json b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_selinux.json similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_selinux.json rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/test/testdata/sandbox_config_selinux.json diff --git a/vendor/github.com/kubernetes-incubator/cri-o/transfer.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/transfer.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/transfer.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/transfer.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/tutorial.md b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/tutorial.md similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/tutorial.md rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/tutorial.md diff --git a/vendor/github.com/kubernetes-incubator/cri-o/types/types.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/types/types.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/types/types.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/types/types.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/utils/utils.go b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/utils/utils.go similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/utils/utils.go rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/utils/utils.go diff --git a/vendor/github.com/kubernetes-incubator/cri-o/vendor.conf b/pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/vendor.conf similarity index 100% rename from vendor/github.com/kubernetes-incubator/cri-o/vendor.conf rename to pkg/build/builder/vendor/github.com/kubernetes-incubator/cri-o/vendor.conf diff --git a/vendor/github.com/containers/storage/.gitignore b/vendor/github.com/containers/storage/.gitignore index b424a96efa22..99899e9b5a22 100644 --- a/vendor/github.com/containers/storage/.gitignore +++ b/vendor/github.com/containers/storage/.gitignore @@ -1,9 +1,8 @@ -# containers/storage project generated files to ignore +# Docker project generated files to ignore # if you want to ignore files created by your editor/tools, # please consider a global .gitignore https://help.github.com/articles/ignoring-files -*.1 *.exe -*~ +*.exe~ *.orig *.test .*.swp @@ -25,8 +24,3 @@ man/man5 man/man8 vendor/pkg/ .vagrant -containers-storage -containers-storage.darwin.amd64 -containers-storage.linux.amd64 -containers-storage.linux.386 -containers-storage.linux.arm diff --git a/vendor/github.com/containers/storage/.travis.yml b/vendor/github.com/containers/storage/.travis.yml index fe01e42b5ac0..36a2a2977ae8 100644 --- a/vendor/github.com/containers/storage/.travis.yml +++ b/vendor/github.com/containers/storage/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.9 - - 1.8 + - tip - 1.7 + - 1.6 dist: trusty sudo: required before_install: diff --git a/vendor/github.com/containers/storage/CONTRIBUTING.md b/vendor/github.com/containers/storage/CONTRIBUTING.md deleted file mode 100644 index 5364be769edb..000000000000 --- a/vendor/github.com/containers/storage/CONTRIBUTING.md +++ /dev/null @@ -1,144 +0,0 @@ -# Contributing to Containers/Storage - -We'd love to have you join the community! Below summarizes the processes -that we follow. - -## Topics - -* [Reporting Issues](#reporting-issues) -* [Submitting Pull Requests](#submitting-pull-requests) -* [Communications](#communications) - - -## Reporting Issues - -Before reporting an issue, check our backlog of -[open issues](https://github.com/containers/storage/issues) -to see if someone else has already reported it. If so, feel free to add -your scenario, or additional information, to the discussion. Or simply -"subscribe" to it to be notified when it is updated. - -If you find a new issue with the project we'd love to hear about it! The most -important aspect of a bug report is that it includes enough information for -us to reproduce it. So, please include as much detail as possible and try -to remove the extra stuff that doesn't really relate to the issue itself. -The easier it is for us to reproduce it, the faster it'll be fixed! - -Please don't include any private/sensitive information in your issue! - -## Submitting Pull Requests - -No Pull Request (PR) is too small! Typos, additional comments in the code, -new testcases, bug fixes, new features, more documentation, ... it's all -welcome! - -While bug fixes can first be identified via an "issue", that is not required. -It's ok to just open up a PR with the fix, but make sure you include the same -information you would have included in an issue - like how to reproduce it. - -PRs for new features should include some background on what use cases the -new code is trying to address. When possible and when it makes sense, try to break-up -larger PRs into smaller ones - it's easier to review smaller -code changes. But only if those smaller ones make sense as stand-alone PRs. - -Regardless of the type of PR, all PRs should include: -* well documented code changes -* additional testcases. Ideally, they should fail w/o your code change applied -* documentation changes - -Squash your commits into logical pieces of work that might want to be reviewed -separate from the rest of the PRs. But, squashing down to just one commit is ok -too since in the end the entire PR will be reviewed anyway. When in doubt, -squash. - -PRs that fix issues should include a reference like `Closes #XXXX` in the -commit message so that github will automatically close the referenced issue -when the PR is merged. - - - -### Sign your PRs - -The sign-off is a line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. - -## Communications - -For general questions, or discussions, please use the -IRC group on `irc.freenode.net` called `container-projects` -that has been setup. - -For discussions around issues/bugs and features, you can use the github -[issues](https://github.com/containers/storage/issues) -and -[PRs](https://github.com/containers/storage/pulls) -tracking system. - - diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index a7a9086bd1b1..e09be3ede435 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -4,7 +4,7 @@ PACKAGE := github.com/containers/storage GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73 -NATIVETAGS := exclude_graphdriver_devicemapper exclude_graphdriver_btrfs exclude_graphdriver_overlay +NATIVETAGS := exclude_graphdriver_devicemapper exclude_graphdriver_btrfs AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO := go @@ -16,22 +16,9 @@ default all: local-binary docs local-validate local-cross local-gccgo test-unit clean: ## remove all built files $(RM) -f containers-storage containers-storage.* docs/*.1 -sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go pkg/*/*.go pkg/*/*/*.go) layers_ffjson.go images_ffjson.go containers_ffjson.go - -containers-storage: $(sources) ## build using gc on the host +binary local-binary: ## build using gc on the host $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage -layers_ffjson.go: layers.go - ffjson layers.go - -images_ffjson.go: images.go - ffjson images.go - -containers_ffjson.go: containers.go - ffjson containers.go - -binary local-binary: containers-storage - local-gccgo: ## build using gccgo on the host GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage @@ -62,7 +49,7 @@ test-unit: local-binary ## run the unit tests using VMs $(RUNINVM) make local-$@ local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) - @cd tests; ./test_runner.bash + @cd tests; ./test_runner.sh test-integration: local-binary ## run the integration tests using VMs $(RUNINVM) make local-$@ diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/README.md b/vendor/github.com/containers/storage/cmd/containers-storage/README.md new file mode 100644 index 000000000000..4a54cd2d2b15 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/README.md @@ -0,0 +1,30 @@ +This is `containers-storage`, a command line tool for manipulating local +layer/image/container stores. + +It depends on `storage`, which is a pretty barebones wrapping of the graph +drivers that exposes the create/mount/unmount/delete operations and adds enough +bookkeeping to know about the relationships between layers. + +On top of that, `storage` provides a notion of a reference to a layer which is +paired with arbitrary user data (i.e., an `image`, that data being history, +configuration, and other metadata). It also provides a notion of a type of +layer, which is typically the child of an image's topmost layer, to which +arbitrary data is directly attached (i.e., a `container`, where the data is +typically configuration). + +Layers, images, and containers are each identified using IDs which can be set +when they are created (if not set, random values are generated), and can +optionally be assigned names which are resolved to IDs automatically by the +various APIs. + +The containers-storage tool is a CLI that wraps that as thinly as possible, so +that other tooling can use it to import layers from images. Those other tools +can then either manage the concept of images on their own, or let the API/CLI +handle storing the image metadata and/or configuration. Likewise, other tools +can create container layers and manage them on their own or use the API/CLI for +storing what I assume will be container metadata and/or configurations. + +Logic for importing images and creating and managing containers will most +likely be implemented elsewhere, and if that implementation ends up not needing +the API/CLI to provide a place to store data about images and containers, that +functionality can be dropped. diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/container.go b/vendor/github.com/containers/storage/cmd/containers-storage/container.go new file mode 100644 index 000000000000..d13f4dd47144 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/container.go @@ -0,0 +1,205 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +var ( + paramContainerDataFile = "" +) + +func container(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + images, err := m.Images() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + matches := []*storage.Container{} + for _, arg := range args { + if container, err := m.Container(arg); err == nil { + matches = append(matches, container) + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(matches) + } else { + for _, container := range matches { + fmt.Printf("ID: %s\n", container.ID) + for _, name := range container.Names { + fmt.Printf("Name: %s\n", name) + } + fmt.Printf("Image: %s\n", container.ImageID) + for _, image := range images { + if image.ID == container.ImageID { + for _, name := range image.Names { + fmt.Printf("Image name: %s\n", name) + } + break + } + } + fmt.Printf("Layer: %s\n", container.LayerID) + for _, name := range container.BigDataNames { + fmt.Printf("Data: %s\n", name) + } + } + } + if len(matches) != len(args) { + return 1 + } + return 0 +} + +func listContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + container, err := m.Container(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + d, err := m.ListContainerBigData(container.ID) + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(d) + } else { + for _, name := range d { + fmt.Printf("%s\n", name) + } + } + return 0 +} + +func getContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + container, err := m.Container(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + output := os.Stdout + if paramContainerDataFile != "" { + f, err := os.Create(paramContainerDataFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + output = f + } + b, err := m.ContainerBigData(container.ID, args[1]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + output.Write(b) + output.Close() + return 0 +} + +func setContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + container, err := m.Container(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + input := os.Stdin + if paramContainerDataFile != "" { + f, err := os.Open(paramContainerDataFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + input = f + } + b, err := ioutil.ReadAll(input) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + err = m.SetContainerBigData(container.ID, args[1], b) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + return 0 +} + +func getContainerDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + path, err := m.ContainerDirectory(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + fmt.Printf("%s\n", path) + return 0 +} + +func getContainerRunDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + path, err := m.ContainerRunDirectory(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + fmt.Printf("%s\n", path) + return 0 +} + +func init() { + commands = append(commands, + command{ + names: []string{"container"}, + optionsHelp: "[options [...]] containerNameOrID [...]", + usage: "Examine a container", + action: container, + minArgs: 1, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }, + command{ + names: []string{"list-container-data", "listcontainerdata"}, + optionsHelp: "[options [...]] containerNameOrID", + usage: "List data items that are attached to an container", + action: listContainerBigData, + minArgs: 1, + maxArgs: 1, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }, + command{ + names: []string{"get-container-data", "getcontainerdata"}, + optionsHelp: "[options [...]] containerNameOrID dataName", + usage: "Get data that is attached to an container", + action: getContainerBigData, + minArgs: 2, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mContainerDataFile, []string{"-file", "f"}, paramContainerDataFile, "Write data to file") + }, + }, + command{ + names: []string{"set-container-data", "setcontainerdata"}, + optionsHelp: "[options [...]] containerNameOrID dataName", + usage: "Set data that is attached to an container", + action: setContainerBigData, + minArgs: 2, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mContainerDataFile, []string{"-file", "f"}, paramContainerDataFile, "Read data from file") + }, + }, + command{ + names: []string{"get-container-dir", "getcontainerdir"}, + optionsHelp: "[options [...]] containerNameOrID", + usage: "Find the container's associated data directory", + action: getContainerDir, + minArgs: 1, + }, + command{ + names: []string{"get-container-run-dir", "getcontainerrundir"}, + optionsHelp: "[options [...]] containerNameOrID", + usage: "Find the container's associated runtime directory", + action: getContainerRunDir, + minArgs: 1, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/containers.go b/vendor/github.com/containers/storage/cmd/containers-storage/containers.go new file mode 100644 index 000000000000..a27121556c78 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/containers.go @@ -0,0 +1,45 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +func containers(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + containers, err := m.Containers() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(containers) + } else { + for _, container := range containers { + fmt.Printf("%s\n", container.ID) + for _, name := range container.Names { + fmt.Printf("\tname: %s\n", name) + } + for _, name := range container.BigDataNames { + fmt.Printf("\tdata: %s\n", name) + } + } + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"containers"}, + optionsHelp: "[options [...]]", + usage: "List containers", + action: containers, + maxArgs: 0, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/create.go b/vendor/github.com/containers/storage/cmd/containers-storage/create.go new file mode 100644 index 000000000000..97bed1084186 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/create.go @@ -0,0 +1,201 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/opts" + "github.com/containers/storage/pkg/mflag" +) + +var ( + paramMountLabel = "" + paramNames = []string{} + paramID = "" + paramLayer = "" + paramMetadata = "" + paramMetadataFile = "" + paramCreateRO = false +) + +func createLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + parent := "" + if len(args) > 0 { + parent = args[0] + } + layer, err := m.CreateLayer(paramID, parent, paramNames, paramMountLabel, !paramCreateRO) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(layer) + } else { + fmt.Printf("%s", layer.ID) + for _, name := range layer.Names { + fmt.Printf("\t%s\n", name) + } + fmt.Printf("\n") + } + return 0 +} + +func importLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + parent := "" + if len(args) > 0 { + parent = args[0] + } + diffStream := io.Reader(os.Stdin) + if applyDiffFile != "" { + f, err := os.Open(applyDiffFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + diffStream = f + defer f.Close() + } + layer, _, err := m.PutLayer(paramID, parent, paramNames, paramMountLabel, !paramCreateRO, diffStream) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(layer) + } else { + fmt.Printf("%s", layer.ID) + for _, name := range layer.Names { + fmt.Printf("\t%s\n", name) + } + fmt.Printf("\n") + } + return 0 +} + +func createImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if paramMetadataFile != "" { + f, err := os.Open(paramMetadataFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + b, err := ioutil.ReadAll(f) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + paramMetadata = string(b) + } + image, err := m.CreateImage(paramID, paramNames, args[0], paramMetadata, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(image) + } else { + fmt.Printf("%s", image.ID) + for _, name := range image.Names { + fmt.Printf("\t%s\n", name) + } + fmt.Printf("\n") + } + return 0 +} + +func createContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if paramMetadataFile != "" { + f, err := os.Open(paramMetadataFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + b, err := ioutil.ReadAll(f) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + paramMetadata = string(b) + } + container, err := m.CreateContainer(paramID, paramNames, args[0], paramLayer, paramMetadata, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(container) + } else { + fmt.Printf("%s", container.ID) + for _, name := range container.Names { + fmt.Printf("\t%s", name) + } + fmt.Printf("\n") + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"create-layer", "createlayer"}, + optionsHelp: "[options [...]] [parentLayerNameOrID]", + usage: "Create a new layer", + maxArgs: 1, + action: createLayer, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mMountLabel, []string{"-label", "l"}, "", "Mount Label") + flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Layer name") + flags.StringVar(¶mID, []string{"-id", "i"}, "", "Layer ID") + flags.BoolVar(¶mCreateRO, []string{"-readonly", "r"}, false, "Mark as read-only") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"import-layer", "importlayer"}, + optionsHelp: "[options [...]] [parentLayerNameOrID]", + usage: "Import a new layer", + maxArgs: 1, + action: importLayer, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mMountLabel, []string{"-label", "l"}, "", "Mount Label") + flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Layer name") + flags.StringVar(¶mID, []string{"-id", "i"}, "", "Layer ID") + flags.BoolVar(¶mCreateRO, []string{"-readonly", "r"}, false, "Mark as read-only") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + flags.StringVar(&applyDiffFile, []string{"-file", "f"}, "", "Read from file instead of stdin") + }, + }) + commands = append(commands, command{ + names: []string{"create-image", "createimage"}, + optionsHelp: "[options [...]] topLayerNameOrID", + usage: "Create a new image using layers", + minArgs: 1, + maxArgs: 1, + action: createImage, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Image name") + flags.StringVar(¶mID, []string{"-id", "i"}, "", "Image ID") + flags.StringVar(¶mMetadata, []string{"-metadata", "m"}, "", "Metadata") + flags.StringVar(¶mMetadataFile, []string{"-metadata-file", "f"}, "", "Metadata File") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"create-container", "createcontainer"}, + optionsHelp: "[options [...]] parentImageNameOrID", + usage: "Create a new container from an image", + minArgs: 1, + maxArgs: 1, + action: createContainer, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Container name") + flags.StringVar(¶mID, []string{"-id", "i"}, "", "Container ID") + flags.StringVar(¶mMetadata, []string{"-metadata", "m"}, "", "Metadata") + flags.StringVar(¶mMetadataFile, []string{"-metadata-file", "f"}, "", "Metadata File") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/delete.go b/vendor/github.com/containers/storage/cmd/containers-storage/delete.go new file mode 100644 index 000000000000..fe42af7e3e93 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/delete.go @@ -0,0 +1,188 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +var testDeleteImage = false + +func deleteThing(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + deleted := make(map[string]string) + for _, what := range args { + err := m.Delete(what) + if err != nil { + deleted[what] = fmt.Sprintf("%v", err) + } else { + deleted[what] = "" + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(deleted) + } else { + for what, err := range deleted { + if err != "" { + fmt.Fprintf(os.Stderr, "%s: %s\n", what, err) + } + } + } + for _, err := range deleted { + if err != "" { + return 1 + } + } + return 0 +} + +func deleteLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + deleted := make(map[string]string) + for _, what := range args { + err := m.DeleteLayer(what) + if err != nil { + deleted[what] = fmt.Sprintf("%v", err) + } else { + deleted[what] = "" + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(deleted) + } else { + for what, err := range deleted { + if err != "" { + fmt.Fprintf(os.Stderr, "%s: %s\n", what, err) + } + } + } + for _, err := range deleted { + if err != "" { + return 1 + } + } + return 0 +} + +type deletedImage struct { + DeletedLayers []string `json:"deleted-layers,omitifempty"` + Error string `json:"error,omitifempty"` +} + +func deleteImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + deleted := make(map[string]deletedImage) + for _, what := range args { + layers, err := m.DeleteImage(what, !testDeleteImage) + errText := "" + if err != nil { + errText = fmt.Sprintf("%v", err) + } + deleted[what] = deletedImage{ + DeletedLayers: layers, + Error: errText, + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(deleted) + } else { + for what, record := range deleted { + if record.Error != "" { + fmt.Fprintf(os.Stderr, "%s: %s\n", what, record.Error) + } else { + for _, layer := range record.DeletedLayers { + fmt.Fprintf(os.Stderr, "%s: %s\n", what, layer) + } + } + } + } + for _, record := range deleted { + if record.Error != "" { + return 1 + } + } + return 0 +} + +func deleteContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + deleted := make(map[string]string) + for _, what := range args { + err := m.DeleteContainer(what) + if err != nil { + deleted[what] = fmt.Sprintf("%v", err) + } else { + deleted[what] = "" + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(deleted) + } else { + for what, err := range deleted { + if err != "" { + fmt.Fprintf(os.Stderr, "%s: %s\n", what, err) + } + } + } + for _, err := range deleted { + if err != "" { + return 1 + } + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"delete"}, + optionsHelp: "[LayerOrImageOrContainerNameOrID [...]]", + usage: "Delete a layer or image or container, with no safety checks", + minArgs: 1, + action: deleteThing, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"delete-layer", "deletelayer"}, + optionsHelp: "[LayerNameOrID [...]]", + usage: "Delete a layer, with safety checks", + minArgs: 1, + action: deleteLayer, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"delete-image", "deleteimage"}, + optionsHelp: "[ImageNameOrID [...]]", + usage: "Delete an image, with safety checks", + minArgs: 1, + action: deleteImage, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&testDeleteImage, []string{"-test", "t"}, jsonOutput, "Only test removal") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"delete-container", "deletecontainer"}, + optionsHelp: "[ContainerNameOrID [...]]", + usage: "Delete a container, with safety checks", + minArgs: 1, + action: deleteContainer, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/diff.go b/vendor/github.com/containers/storage/cmd/containers-storage/diff.go new file mode 100644 index 000000000000..7774657711ab --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/diff.go @@ -0,0 +1,191 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/mflag" +) + +var ( + applyDiffFile = "" + diffFile = "" + diffUncompressed = false + diffGzip = false + diffBzip2 = false + diffXz = false +) + +func changes(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + to := args[0] + from := "" + if len(args) >= 2 { + from = args[1] + } + changes, err := m.Changes(from, to) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(changes) + } else { + for _, change := range changes { + what := "?" + switch change.Kind { + case archive.ChangeAdd: + what = "Add" + case archive.ChangeModify: + what = "Modify" + case archive.ChangeDelete: + what = "Delete" + } + fmt.Printf("%s %q\n", what, change.Path) + } + } + return 0 +} + +func diff(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + to := args[0] + from := "" + if len(args) >= 2 { + from = args[1] + } + diffStream := io.Writer(os.Stdout) + if diffFile != "" { + f, err := os.Create(diffFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + diffStream = f + defer f.Close() + } + + options := storage.DiffOptions{} + if diffUncompressed || diffGzip || diffBzip2 || diffXz { + c := archive.Uncompressed + if diffGzip { + c = archive.Gzip + } + if diffBzip2 { + c = archive.Bzip2 + } + if diffXz { + c = archive.Xz + } + options.Compression = &c + } + + reader, err := m.Diff(from, to, &options) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + _, err = io.Copy(diffStream, reader) + reader.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + return 0 +} + +func applyDiff(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + diffStream := io.Reader(os.Stdin) + if applyDiffFile != "" { + f, err := os.Open(applyDiffFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + diffStream = f + defer f.Close() + } + _, err := m.ApplyDiff(args[0], diffStream) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + return 0 +} + +func diffSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + to := args[0] + from := "" + if len(args) >= 2 { + from = args[1] + } + n, err := m.DiffSize(from, to) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + fmt.Printf("%d\n", n) + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"changes"}, + usage: "Compare two layers", + optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]", + minArgs: 1, + maxArgs: 2, + action: changes, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"diffsize", "diff-size"}, + usage: "Compare two layers", + optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]", + minArgs: 1, + maxArgs: 2, + action: diffSize, + }) + commands = append(commands, command{ + names: []string{"diff"}, + usage: "Compare two layers", + optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]", + minArgs: 1, + maxArgs: 2, + action: diff, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(&diffFile, []string{"-file", "f"}, "", "Write to file instead of stdout") + flags.BoolVar(&diffUncompressed, []string{"-uncompressed", "u"}, diffUncompressed, "Use no compression") + flags.BoolVar(&diffGzip, []string{"-gzip", "c"}, diffGzip, "Compress using gzip") + flags.BoolVar(&diffBzip2, []string{"-bzip2", "-bz2", "b"}, diffBzip2, "Compress using bzip2 (not currently supported)") + flags.BoolVar(&diffXz, []string{"-xz", "x"}, diffXz, "Compress using xz (not currently supported)") + }, + }) + commands = append(commands, command{ + names: []string{"applydiff", "apply-diff"}, + optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]", + usage: "Apply a diff to a layer", + minArgs: 1, + maxArgs: 1, + action: applyDiff, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(&applyDiffFile, []string{"-file", "f"}, "", "Read from file instead of stdin") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/exists.go b/vendor/github.com/containers/storage/cmd/containers-storage/exists.go new file mode 100644 index 000000000000..fab1aec57050 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/exists.go @@ -0,0 +1,77 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +var ( + existLayer = false + existImage = false + existContainer = false + existQuiet = false +) + +func exist(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + anyMissing := false + existDict := make(map[string]bool) + for _, what := range args { + exists := m.Exists(what) + existDict[what] = exists + if existContainer { + if c, err := m.Container(what); c == nil || err != nil { + exists = false + } + } + if existImage { + if i, err := m.Image(what); i == nil || err != nil { + exists = false + } + } + if existLayer { + if l, err := m.Layer(what); l == nil || err != nil { + exists = false + } + } + if !exists { + anyMissing = true + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(existDict) + } else { + if !existQuiet { + for what, exists := range existDict { + fmt.Printf("%s: %v\n", what, exists) + } + } + } + if anyMissing { + return 1 + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"exists"}, + optionsHelp: "[LayerOrImageOrContainerNameOrID [...]]", + usage: "Check if a layer or image or container exists", + minArgs: 1, + action: exist, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&existQuiet, []string{"-quiet", "q"}, existQuiet, "Don't print names") + flags.BoolVar(&existLayer, []string{"-layer", "l"}, existQuiet, "Only succeed if the match is a layer") + flags.BoolVar(&existImage, []string{"-image", "i"}, existQuiet, "Only succeed if the match is an image") + flags.BoolVar(&existContainer, []string{"-container", "c"}, existQuiet, "Only succeed if the match is a container") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/image.go b/vendor/github.com/containers/storage/cmd/containers-storage/image.go new file mode 100644 index 000000000000..79c45587ea26 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/image.go @@ -0,0 +1,157 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +var ( + paramImageDataFile = "" +) + +func image(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + matched := []*storage.Image{} + for _, arg := range args { + if image, err := m.Image(arg); err == nil { + matched = append(matched, image) + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(matched) + } else { + for _, image := range matched { + fmt.Printf("ID: %s\n", image.ID) + for _, name := range image.Names { + fmt.Printf("Name: %s\n", name) + } + fmt.Printf("Top Layer: %s\n", image.TopLayer) + for _, name := range image.BigDataNames { + fmt.Printf("Data: %s\n", name) + } + } + } + if len(matched) != len(args) { + return 1 + } + return 0 +} + +func listImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + image, err := m.Image(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + d, err := m.ListImageBigData(image.ID) + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(d) + } else { + for _, name := range d { + fmt.Printf("%s\n", name) + } + } + return 0 +} + +func getImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + image, err := m.Image(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + output := os.Stdout + if paramImageDataFile != "" { + f, err := os.Create(paramImageDataFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + output = f + } + b, err := m.ImageBigData(image.ID, args[1]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + output.Write(b) + output.Close() + return 0 +} + +func setImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + image, err := m.Image(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + input := os.Stdin + if paramImageDataFile != "" { + f, err := os.Open(paramImageDataFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + input = f + } + b, err := ioutil.ReadAll(input) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + err = m.SetImageBigData(image.ID, args[1], b) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + return 0 +} + +func init() { + commands = append(commands, + command{ + names: []string{"image"}, + optionsHelp: "[options [...]] imageNameOrID [...]", + usage: "Examine an image", + action: image, + minArgs: 1, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }, + command{ + names: []string{"list-image-data", "listimagedata"}, + optionsHelp: "[options [...]] imageNameOrID", + usage: "List data items that are attached to an image", + action: listImageBigData, + minArgs: 1, + maxArgs: 1, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }, + command{ + names: []string{"get-image-data", "getimagedata"}, + optionsHelp: "[options [...]] imageNameOrID dataName", + usage: "Get data that is attached to an image", + action: getImageBigData, + minArgs: 2, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mImageDataFile, []string{"-file", "f"}, paramImageDataFile, "Write data to file") + }, + }, + command{ + names: []string{"set-image-data", "setimagedata"}, + optionsHelp: "[options [...]] imageNameOrID dataName", + usage: "Set data that is attached to an image", + action: setImageBigData, + minArgs: 2, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mImageDataFile, []string{"-file", "f"}, paramImageDataFile, "Read data from file") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/images.go b/vendor/github.com/containers/storage/cmd/containers-storage/images.go new file mode 100644 index 000000000000..0d895ac8894c --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/images.go @@ -0,0 +1,45 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +func images(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + images, err := m.Images() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(images) + } else { + for _, image := range images { + fmt.Printf("%s\n", image.ID) + for _, name := range image.Names { + fmt.Printf("\tname: %s\n", name) + } + for _, name := range image.BigDataNames { + fmt.Printf("\tdata: %s\n", name) + } + } + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"images"}, + optionsHelp: "[options [...]]", + usage: "List images", + action: images, + maxArgs: 0, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/layers.go b/vendor/github.com/containers/storage/cmd/containers-storage/layers.go new file mode 100644 index 000000000000..26b6041fa319 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/layers.go @@ -0,0 +1,113 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +var listLayersTree = false + +func layers(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + layers, err := m.Layers() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(layers) + return 0 + } + imageMap := make(map[string]*[]storage.Image) + if images, err := m.Images(); err == nil { + for _, image := range images { + if ilist, ok := imageMap[image.TopLayer]; ok && ilist != nil { + list := append(*ilist, image) + imageMap[image.TopLayer] = &list + } else { + list := []storage.Image{image} + imageMap[image.TopLayer] = &list + } + } + } + containerMap := make(map[string]storage.Container) + if containers, err := m.Containers(); err == nil { + for _, container := range containers { + containerMap[container.LayerID] = container + } + } + nodes := []treeNode{} + for _, layer := range layers { + if listLayersTree { + node := treeNode{ + left: string(layer.Parent), + right: string(layer.ID), + notes: []string{}, + } + if node.left == "" { + node.left = "(base)" + } + for _, name := range layer.Names { + node.notes = append(node.notes, "name: "+name) + } + if layer.MountPoint != "" { + node.notes = append(node.notes, "mount: "+layer.MountPoint) + } + if imageList, ok := imageMap[layer.ID]; ok && imageList != nil { + for _, image := range *imageList { + node.notes = append(node.notes, fmt.Sprintf("image: %s", image.ID)) + for _, name := range image.Names { + node.notes = append(node.notes, fmt.Sprintf("image name: %s", name)) + } + } + } + if container, ok := containerMap[layer.ID]; ok { + node.notes = append(node.notes, fmt.Sprintf("container: %s", container.ID)) + for _, name := range container.Names { + node.notes = append(node.notes, fmt.Sprintf("container name: %s", name)) + } + } + nodes = append(nodes, node) + } else { + fmt.Printf("%s\n", layer.ID) + for _, name := range layer.Names { + fmt.Printf("\tname: %s\n", name) + } + if imageList, ok := imageMap[layer.ID]; ok && imageList != nil { + for _, image := range *imageList { + fmt.Printf("\timage: %s\n", image.ID) + for _, name := range image.Names { + fmt.Printf("\t\tname: %s\n", name) + } + } + } + if container, ok := containerMap[layer.ID]; ok { + fmt.Printf("\tcontainer: %s\n", container.ID) + for _, name := range container.Names { + fmt.Printf("\t\tname: %s\n", name) + } + } + } + } + if listLayersTree { + printTree(nodes) + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"layers"}, + optionsHelp: "[options [...]]", + usage: "List layers", + action: layers, + maxArgs: 0, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&listLayersTree, []string{"-tree", "t"}, listLayersTree, "Use a tree") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/main.go b/vendor/github.com/containers/storage/cmd/containers-storage/main.go new file mode 100644 index 000000000000..17c04aa6136d --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/main.go @@ -0,0 +1,126 @@ +package main + +import ( + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/opts" + "github.com/containers/storage/pkg/mflag" + "github.com/containers/storage/pkg/reexec" + "github.com/sirupsen/logrus" +) + +type command struct { + names []string + optionsHelp string + minArgs int + maxArgs int + usage string + addFlags func(*mflag.FlagSet, *command) + action func(*mflag.FlagSet, string, storage.Store, []string) int +} + +var ( + commands = []command{} + jsonOutput = false +) + +func main() { + if reexec.Init() { + return + } + + options := storage.DefaultStoreOptions + debug := false + + makeFlags := func(command string, eh mflag.ErrorHandling) *mflag.FlagSet { + flags := mflag.NewFlagSet(command, eh) + flags.StringVar(&options.RunRoot, []string{"-run", "R"}, options.RunRoot, "Root of the runtime state tree") + flags.StringVar(&options.GraphRoot, []string{"-graph", "g"}, options.GraphRoot, "Root of the storage tree") + flags.StringVar(&options.GraphDriverName, []string{"-storage-driver", "s"}, options.GraphDriverName, "Storage driver to use ($STORAGE_DRIVER)") + flags.Var(opts.NewListOptsRef(&options.GraphDriverOptions, nil), []string{"-storage-opt"}, "Set storage driver options ($STORAGE_OPTS)") + flags.BoolVar(&debug, []string{"-debug", "D"}, debug, "Print debugging information") + return flags + } + + flags := makeFlags("containers-storage", mflag.ContinueOnError) + flags.Usage = func() { + fmt.Printf("Usage: containers-storage command [options [...]]\n\n") + fmt.Printf("Commands:\n\n") + for _, command := range commands { + fmt.Printf(" %-22s%s\n", command.names[0], command.usage) + } + fmt.Printf("\nOptions:\n") + flags.PrintDefaults() + } + + if len(os.Args) < 2 { + flags.Usage() + os.Exit(1) + } + if err := flags.ParseFlags(os.Args[1:], true); err != nil { + fmt.Printf("%v while parsing arguments (1)\n", err) + flags.Usage() + os.Exit(1) + } + + args := flags.Args() + if len(args) < 1 { + flags.Usage() + os.Exit(1) + return + } + cmd := args[0] + + for _, command := range commands { + for _, name := range command.names { + if cmd == name { + flags := makeFlags(cmd, mflag.ExitOnError) + if command.addFlags != nil { + command.addFlags(flags, &command) + } + flags.Usage = func() { + fmt.Printf("Usage: containers-storage %s %s\n\n", cmd, command.optionsHelp) + fmt.Printf("%s\n", command.usage) + fmt.Printf("\nOptions:\n") + flags.PrintDefaults() + } + if err := flags.ParseFlags(args[1:], false); err != nil { + fmt.Printf("%v while parsing arguments (3)", err) + flags.Usage() + os.Exit(1) + } + args = flags.Args() + if command.minArgs != 0 && len(args) < command.minArgs { + fmt.Printf("%s: more arguments required.\n", cmd) + flags.Usage() + os.Exit(1) + } + if command.maxArgs != 0 && len(args) > command.maxArgs { + fmt.Printf("%s: too many arguments (%s).\n", cmd, args) + flags.Usage() + os.Exit(1) + } + if debug { + logrus.SetLevel(logrus.DebugLevel) + logrus.Debugf("RunRoot: %s", options.RunRoot) + logrus.Debugf("GraphRoot: %s", options.GraphRoot) + logrus.Debugf("GraphDriverName: %s", options.GraphDriverName) + logrus.Debugf("GraphDriverOptions: %s", options.GraphDriverOptions) + } else { + logrus.SetLevel(logrus.ErrorLevel) + } + store, err := storage.GetStore(options) + if err != nil { + fmt.Printf("error initializing: %v\n", err) + os.Exit(1) + } + os.Exit(command.action(flags, cmd, store, args)) + break + } + } + } + fmt.Printf("%s: unrecognized command.\n", cmd) + os.Exit(1) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/metadata.go b/vendor/github.com/containers/storage/cmd/containers-storage/metadata.go new file mode 100644 index 000000000000..655d1df3a61f --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/metadata.go @@ -0,0 +1,98 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +var metadataQuiet = false + +func metadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + metadataDict := make(map[string]string) + missingAny := false + for _, what := range args { + if metadata, err := m.Metadata(what); err == nil { + metadataDict[what] = strings.TrimSuffix(metadata, "\n") + } else { + missingAny = true + } + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(metadataDict) + } else { + for _, what := range args { + if metadataQuiet { + fmt.Printf("%s\n", metadataDict[what]) + } else { + fmt.Printf("%s: %s\n", what, metadataDict[what]) + } + } + } + if missingAny { + return 1 + } + return 0 +} + +func setMetadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + if paramMetadataFile == "" && paramMetadata == "" { + fmt.Fprintf(os.Stderr, "no new metadata provided\n") + return 1 + } + if paramMetadataFile != "" { + f, err := os.Open(paramMetadataFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + b, err := ioutil.ReadAll(f) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + paramMetadata = string(b) + } + if err := m.SetMetadata(args[0], paramMetadata); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"metadata"}, + optionsHelp: "[LayerOrImageOrContainerNameOrID [...]]", + usage: "Retrieve layer, image, or container metadata", + minArgs: 1, + action: metadata, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&metadataQuiet, []string{"-quiet", "q"}, metadataQuiet, "Omit names and IDs") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"set-metadata", "setmetadata"}, + optionsHelp: "[options [...]] layerOrImageOrContainerNameOrID", + usage: "Set layer, image, or container metadata", + minArgs: 1, + maxArgs: 1, + action: setMetadata, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mMetadata, []string{"-metadata", "m"}, "", "Metadata") + flags.StringVar(¶mMetadataFile, []string{"-metadata-file", "f"}, "", "Metadata File") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/mount.go b/vendor/github.com/containers/storage/cmd/containers-storage/mount.go new file mode 100644 index 000000000000..70075b2f7adb --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/mount.go @@ -0,0 +1,99 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +type mountPointOrError struct { + ID string `json:"id"` + MountPoint string `json:"mountpoint"` + Error string `json:"error"` +} +type mountPointError struct { + ID string `json:"id"` + Error string `json:"error"` +} + +func mount(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + moes := []mountPointOrError{} + for _, arg := range args { + result, err := m.Mount(arg, paramMountLabel) + errText := "" + if err != nil { + errText = fmt.Sprintf("%v", err) + } + moes = append(moes, mountPointOrError{arg, result, errText}) + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(moes) + } else { + for _, mountOrError := range moes { + if mountOrError.Error != "" { + fmt.Fprintf(os.Stderr, "%s while mounting %s\n", mountOrError.Error, mountOrError.ID) + } + fmt.Printf("%s\n", mountOrError.MountPoint) + } + } + for _, mountOrErr := range moes { + if mountOrErr.Error != "" { + return 1 + } + } + return 0 +} + +func unmount(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + mes := []mountPointError{} + errors := false + for _, arg := range args { + err := m.Unmount(arg) + errText := "" + if err != nil { + errText = fmt.Sprintf("%v", err) + errors = true + } + mes = append(mes, mountPointError{arg, errText}) + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(mes) + } else { + for _, me := range mes { + if me.Error != "" { + fmt.Fprintf(os.Stderr, "%s while unmounting %s\n", me.Error, me.ID) + } + } + } + if errors { + return 1 + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"mount"}, + optionsHelp: "[options [...]] LayerOrContainerNameOrID", + usage: "Mount a layer or container", + minArgs: 1, + action: mount, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.StringVar(¶mMountLabel, []string{"-label", "l"}, "", "Mount Label") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"unmount", "umount"}, + optionsHelp: "LayerOrContainerNameOrID", + usage: "Unmount a layer or container", + minArgs: 1, + action: unmount, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/name.go b/vendor/github.com/containers/storage/cmd/containers-storage/name.go new file mode 100644 index 000000000000..3c35e6a8bd1c --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/name.go @@ -0,0 +1,96 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/opts" + "github.com/containers/storage/pkg/mflag" +) + +func addNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + id, err := m.Lookup(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + oldnames, err := m.Names(id) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + newNames := []string{} + if oldnames != nil { + newNames = append(newNames, oldnames...) + } + if paramNames != nil { + newNames = append(newNames, paramNames...) + } + if err := m.SetNames(id, newNames); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + names, err := m.Names(id) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(names) + } + return 0 +} + +func setNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + if len(args) < 1 { + return 1 + } + id, err := m.Lookup(args[0]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if err := m.SetNames(id, paramNames); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + names, err := m.Names(id) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(names) + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"add-names", "addnames"}, + optionsHelp: "[options [...]] imageOrContainerNameOrID", + usage: "Add layer, image, or container name or names", + minArgs: 1, + action: addNames, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "New name") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) + commands = append(commands, command{ + names: []string{"set-names", "setnames"}, + optionsHelp: "[options [...]] imageOrContainerNameOrID", + usage: "Set layer, image, or container name or names", + minArgs: 1, + action: setNames, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "New name") + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/shutdown.go b/vendor/github.com/containers/storage/cmd/containers-storage/shutdown.go new file mode 100644 index 000000000000..595ca634e4d8 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/shutdown.go @@ -0,0 +1,46 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +var ( + forceShutdown = false +) + +func shutdown(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + _, err := m.Shutdown(forceShutdown) + if jsonOutput { + if err == nil { + json.NewEncoder(os.Stdout).Encode(string("")) + } else { + json.NewEncoder(os.Stdout).Encode(err) + } + } else { + if err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", action, err) + } + } + if err != nil { + return 1 + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"shutdown"}, + usage: "Shut down layer storage", + minArgs: 0, + action: shutdown, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + flags.BoolVar(&forceShutdown, []string{"-force", "f"}, forceShutdown, "Unmount mounted layers first") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/status.go b/vendor/github.com/containers/storage/cmd/containers-storage/status.go new file mode 100644 index 000000000000..9b7669320c3a --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/status.go @@ -0,0 +1,38 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +func status(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + status, err := m.Status() + if err != nil { + fmt.Fprintf(os.Stderr, "status: %v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(status) + } else { + for _, pair := range status { + fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1]) + } + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"status"}, + usage: "Check on graph driver status", + minArgs: 0, + action: status, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/tree.go b/vendor/github.com/containers/storage/cmd/containers-storage/tree.go new file mode 100644 index 000000000000..3ff7572781c1 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/tree.go @@ -0,0 +1,88 @@ +package main + +import ( + "fmt" + "strings" +) + +const treeIndentStep = 2 +const treeStemWidth = treeIndentStep - 1 +const treeVertical = '\u2502' +const treeThisAndMore = "\u251c" +const treeJustThis = "\u2514" +const treeStem = "\u2500" + +type treeNode struct { + left, right string + notes []string +} + +func selectRoot(nodes []treeNode) string { + children := make(map[string][]string) + areChildren := make(map[string]bool) + for _, node := range nodes { + areChildren[node.right] = true + if childlist, ok := children[node.left]; ok { + children[node.left] = append(childlist, node.right) + } else { + children[node.left] = []string{node.right} + } + } + favorite := "" + for left, right := range children { + if areChildren[left] { + continue + } + if favorite == "" { + favorite = left + } else if len(right) < len(children[favorite]) { + favorite = left + } + } + return favorite +} + +func printSubTree(root string, nodes []treeNode, indent int, continued []int) []treeNode { + leftovers := []treeNode{} + children := []treeNode{} + for _, node := range nodes { + if node.left != root { + leftovers = append(leftovers, node) + continue + } + children = append(children, node) + } + for n, child := range children { + istring := []rune(strings.Repeat(" ", indent)) + for _, column := range continued { + istring[column] = treeVertical + } + subc := continued[:] + header := treeJustThis + noteHeader := " " + if n < len(children)-1 { + subc = append(subc, indent) + header = treeThisAndMore + noteHeader = string(treeVertical) + } + fmt.Printf("%s%s%s%s\n", string(istring), header, strings.Repeat(treeStem, treeStemWidth), child.right) + for _, note := range child.notes { + fmt.Printf("%s%s%s%s\n", string(istring), noteHeader, strings.Repeat(" ", treeStemWidth), note) + } + leftovers = printSubTree(child.right, leftovers, indent+treeIndentStep, subc) + } + return leftovers +} + +func printTree(nodes []treeNode) { + for len(nodes) > 0 { + root := selectRoot(nodes) + fmt.Printf("%s\n", root) + oldLength := len(nodes) + nodes = printSubTree(root, nodes, 0, []int{}) + newLength := len(nodes) + if oldLength == newLength { + break + } + } +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/tree_test.go b/vendor/github.com/containers/storage/cmd/containers-storage/tree_test.go new file mode 100644 index 000000000000..c9f7437e916f --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/tree_test.go @@ -0,0 +1,25 @@ +package main + +import "testing" + +func TestTree(*testing.T) { + nodes := []treeNode{ + {"F", "H", []string{}}, + {"F", "I", []string{}}, + {"F", "J", []string{}}, + {"A", "B", []string{}}, + {"A", "C", []string{}}, + {"A", "K", []string{}}, + {"C", "F", []string{}}, + {"C", "G", []string{"beware", "the", "scary", "thing"}}, + {"C", "L", []string{}}, + {"B", "D", []string{}}, + {"B", "E", []string{}}, + {"B", "M", []string{}}, + {"K", "N", []string{}}, + {"W", "X", []string{}}, + {"Y", "Z", []string{}}, + {"X", "Y", []string{}}, + } + printTree(nodes) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/version.go b/vendor/github.com/containers/storage/cmd/containers-storage/version.go new file mode 100644 index 000000000000..db910c6986d0 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/version.go @@ -0,0 +1,38 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +func version(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + version, err := m.Version() + if err != nil { + fmt.Fprintf(os.Stderr, "version: %v\n", err) + return 1 + } + if jsonOutput { + json.NewEncoder(os.Stdout).Encode(version) + } else { + for _, pair := range version { + fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1]) + } + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"version"}, + usage: "Return containers-storage version information", + minArgs: 0, + action: version, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/cmd/containers-storage/wipe.go b/vendor/github.com/containers/storage/cmd/containers-storage/wipe.go new file mode 100644 index 000000000000..8a0c709fb5a4 --- /dev/null +++ b/vendor/github.com/containers/storage/cmd/containers-storage/wipe.go @@ -0,0 +1,41 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/containers/storage" + "github.com/containers/storage/pkg/mflag" +) + +func wipe(flags *mflag.FlagSet, action string, m storage.Store, args []string) int { + err := m.Wipe() + if jsonOutput { + if err == nil { + json.NewEncoder(os.Stdout).Encode(string("")) + } else { + json.NewEncoder(os.Stdout).Encode(err) + } + } else { + if err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", action, err) + } + } + if err != nil { + return 1 + } + return 0 +} + +func init() { + commands = append(commands, command{ + names: []string{"wipe"}, + usage: "Wipe all layers, images, and containers", + minArgs: 0, + action: wipe, + addFlags: func(flags *mflag.FlagSet, cmd *command) { + flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output") + }, + }) +} diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 5631e31c3ea7..0908bdd12586 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -2,6 +2,7 @@ package storage import ( "encoding/json" + "errors" "io/ioutil" "os" "path/filepath" @@ -10,8 +11,11 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID + ErrContainerUnknown = errors.New("container not known") ) // A Container is a reference to a read-write layer with metadata. @@ -46,10 +50,6 @@ type Container struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` - // BigDataDigests maps the names in BigDataNames to the digests of the - // data that has been stored, if they're known. - BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` - // Created is the datestamp for when this container was created. Older // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value @@ -139,7 +139,6 @@ func (r *containerStore) Load() error { ids := make(map[string]*Container) names := make(map[string]*Container) if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(containers)) for n, container := range containers { idlist = append(idlist, container.ID) ids[container.ID] = containers[n] @@ -230,9 +229,6 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro if !ok { return ErrContainerUnknown } - if container.Flags == nil { - container.Flags = make(map[string]interface{}) - } container.Flags[flag] = value return r.Save() } @@ -249,7 +245,6 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat if _, idInUse := r.byid[id]; idInUse { return nil, ErrDuplicateID } - names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { return nil, ErrDuplicateName @@ -257,16 +252,15 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat } if err == nil { container = &Container{ - ID: id, - Names: names, - ImageID: image, - LayerID: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + Created: time.Now().UTC(), + Flags: make(map[string]interface{}), } r.containers = append(r.containers, container) r.byid[id] = container @@ -300,7 +294,6 @@ func (r *containerStore) removeName(container *Container, name string) { } func (r *containerStore) SetNames(id string, names []string) error { - names = dedupeNames(names) if container, ok := r.lookup(id); ok { for _, name := range container.Names { delete(r.byname, name) @@ -373,9 +366,6 @@ func (r *containerStore) Exists(id string) bool { } func (r *containerStore) BigData(id, key string) ([]byte, error) { - if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") - } c, ok := r.lookup(id) if !ok { return nil, ErrContainerUnknown @@ -384,61 +374,16 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) { } func (r *containerStore) BigDataSize(id, key string) (int64, error) { - if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") - } c, ok := r.lookup(id) if !ok { return -1, ErrContainerUnknown } - if c.BigDataSizes == nil { - c.BigDataSizes = make(map[string]int64) - } if size, ok := c.BigDataSizes[key]; ok { return size, nil } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - c, ok := r.lookup(id) - if !ok { - return -1, ErrContainerUnknown - } - if size, ok := c.BigDataSizes[key]; ok { - return size, nil - } - } - } return -1, ErrSizeUnknown } -func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { - if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") - } - c, ok := r.lookup(id) - if !ok { - return "", ErrContainerUnknown - } - if c.BigDataDigests == nil { - c.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := c.BigDataDigests[key]; ok { - return d, nil - } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - c, ok := r.lookup(id) - if !ok { - return "", ErrContainerUnknown - } - if d, ok := c.BigDataDigests[key]; ok { - return d, nil - } - } - } - return "", ErrDigestUnknown -} - func (r *containerStore) BigDataNames(id string) ([]string, error) { c, ok := r.lookup(id) if !ok { @@ -448,9 +393,6 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) { } func (r *containerStore) SetBigData(id, key string, data []byte) error { - if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") - } c, ok := r.lookup(id) if !ok { return ErrContainerUnknown @@ -461,28 +403,19 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) if err == nil { save := false - if c.BigDataSizes == nil { - c.BigDataSizes = make(map[string]int64) - } - oldSize, sizeOk := c.BigDataSizes[key] + oldSize, ok := c.BigDataSizes[key] c.BigDataSizes[key] = int64(len(data)) - if c.BigDataDigests == nil { - c.BigDataDigests = make(map[string]digest.Digest) - } - oldDigest, digestOk := c.BigDataDigests[key] - newDigest := digest.Canonical.FromBytes(data) - c.BigDataDigests[key] = newDigest - if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + if !ok || oldSize != c.BigDataSizes[key] { save = true } - addName := true + add := true for _, name := range c.BigDataNames { if name == key { - addName = false + add = false break } } - if addName { + if add { c.BigDataNames = append(c.BigDataNames, key) save = true } @@ -494,7 +427,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { } func (r *containerStore) Wipe() error { - ids := make([]string, 0, len(r.byid)) + ids := []string{} for id := range r.byid { ids = append(ids, id) } diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go deleted file mode 100644 index 95261980612e..000000000000 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ /dev/null @@ -1,1194 +0,0 @@ -// Code generated by ffjson . DO NOT EDIT. -// source: containers.go - -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/opencontainers/go-digest" - fflib "github.com/pquerna/ffjson/fflib/v1" -) - -// MarshalJSON marshal bytes to json - template -func (j *Container) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{ "id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteByte(',') - if len(j.Names) != 0 { - buf.WriteString(`"names":`) - if j.Names != nil { - buf.WriteString(`[`) - for i, v := range j.Names { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - buf.WriteString(`"image":`) - fflib.WriteJsonString(buf, string(j.ImageID)) - buf.WriteString(`,"layer":`) - fflib.WriteJsonString(buf, string(j.LayerID)) - buf.WriteByte(',') - if len(j.Metadata) != 0 { - buf.WriteString(`"metadata":`) - fflib.WriteJsonString(buf, string(j.Metadata)) - buf.WriteByte(',') - } - if len(j.BigDataNames) != 0 { - buf.WriteString(`"big-data-names":`) - if j.BigDataNames != nil { - buf.WriteString(`[`) - for i, v := range j.BigDataNames { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.BigDataSizes) != 0 { - if j.BigDataSizes == nil { - buf.WriteString(`"big-data-sizes":null`) - } else { - buf.WriteString(`"big-data-sizes":{ `) - for key, value := range j.BigDataSizes { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.FormatBits2(buf, uint64(value), 10, value < 0) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if len(j.BigDataDigests) != 0 { - if j.BigDataDigests == nil { - buf.WriteString(`"big-data-digests":null`) - } else { - buf.WriteString(`"big-data-digests":{ `) - for key, value := range j.BigDataDigests { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.WriteJsonString(buf, string(value)) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if true { - buf.WriteString(`"created":`) - - { - - obj, err = j.Created.MarshalJSON() - if err != nil { - return err - } - buf.Write(obj) - - } - buf.WriteByte(',') - } - if len(j.Flags) != 0 { - buf.WriteString(`"flags":`) - /* Falling back. type=map[string]interface {} kind=map */ - err = buf.Encode(j.Flags) - if err != nil { - return err - } - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - return nil -} - -const ( - ffjtContainerbase = iota - ffjtContainernosuchkey - - ffjtContainerID - - ffjtContainerNames - - ffjtContainerImageID - - ffjtContainerLayerID - - ffjtContainerMetadata - - ffjtContainerBigDataNames - - ffjtContainerBigDataSizes - - ffjtContainerBigDataDigests - - ffjtContainerCreated - - ffjtContainerFlags -) - -var ffjKeyContainerID = []byte("id") - -var ffjKeyContainerNames = []byte("names") - -var ffjKeyContainerImageID = []byte("image") - -var ffjKeyContainerLayerID = []byte("layer") - -var ffjKeyContainerMetadata = []byte("metadata") - -var ffjKeyContainerBigDataNames = []byte("big-data-names") - -var ffjKeyContainerBigDataSizes = []byte("big-data-sizes") - -var ffjKeyContainerBigDataDigests = []byte("big-data-digests") - -var ffjKeyContainerCreated = []byte("created") - -var ffjKeyContainerFlags = []byte("flags") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *Container) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *Container) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtContainerbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtContainernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'b': - - if bytes.Equal(ffjKeyContainerBigDataNames, kn) { - currentKey = ffjtContainerBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyContainerBigDataSizes, kn) { - currentKey = ffjtContainerBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyContainerBigDataDigests, kn) { - currentKey = ffjtContainerBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'c': - - if bytes.Equal(ffjKeyContainerCreated, kn) { - currentKey = ffjtContainerCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'f': - - if bytes.Equal(ffjKeyContainerFlags, kn) { - currentKey = ffjtContainerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeyContainerID, kn) { - currentKey = ffjtContainerID - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyContainerImageID, kn) { - currentKey = ffjtContainerImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'l': - - if bytes.Equal(ffjKeyContainerLayerID, kn) { - currentKey = ffjtContainerLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'm': - - if bytes.Equal(ffjKeyContainerMetadata, kn) { - currentKey = ffjtContainerMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'n': - - if bytes.Equal(ffjKeyContainerNames, kn) { - currentKey = ffjtContainerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyContainerFlags, kn) { - currentKey = ffjtContainerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerCreated, kn) { - currentKey = ffjtContainerCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerBigDataDigests, kn) { - currentKey = ffjtContainerBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerBigDataSizes, kn) { - currentKey = ffjtContainerBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerBigDataNames, kn) { - currentKey = ffjtContainerBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerMetadata, kn) { - currentKey = ffjtContainerMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerLayerID, kn) { - currentKey = ffjtContainerLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerImageID, kn) { - currentKey = ffjtContainerImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerNames, kn) { - currentKey = ffjtContainerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerID, kn) { - currentKey = ffjtContainerID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtContainernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtContainerID: - goto handle_ID - - case ffjtContainerNames: - goto handle_Names - - case ffjtContainerImageID: - goto handle_ImageID - - case ffjtContainerLayerID: - goto handle_LayerID - - case ffjtContainerMetadata: - goto handle_Metadata - - case ffjtContainerBigDataNames: - goto handle_BigDataNames - - case ffjtContainerBigDataSizes: - goto handle_BigDataSizes - - case ffjtContainerBigDataDigests: - goto handle_BigDataDigests - - case ffjtContainerCreated: - goto handle_Created - - case ffjtContainerFlags: - goto handle_Flags - - case ffjtContainernosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Names: - - /* handler: j.Names type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Names = nil - } else { - - j.Names = []string{} - - wantVal := true - - for { - - var tmpJNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJNames = string(string(outBuf)) - - } - } - - j.Names = append(j.Names, tmpJNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_ImageID: - - /* handler: j.ImageID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ImageID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_LayerID: - - /* handler: j.LayerID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.LayerID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Metadata: - - /* handler: j.Metadata type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Metadata = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataNames: - - /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataNames = nil - } else { - - j.BigDataNames = []string{} - - wantVal := true - - for { - - var tmpJBigDataNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataNames = string(string(outBuf)) - - } - } - - j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataSizes: - - /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataSizes = nil - } else { - - j.BigDataSizes = make(map[string]int64, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataSizes int64 - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - tmpJBigDataSizes = int64(tval) - - } - } - - j.BigDataSizes[k] = tmpJBigDataSizes - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataDigests: - - /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataDigests = nil - } else { - - j.BigDataDigests = make(map[string]digest.Digest, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataDigests digest.Digest - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataDigests = digest.Digest(string(outBuf)) - - } - } - - j.BigDataDigests[k] = tmpJBigDataDigests - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Created: - - /* handler: j.Created type=time.Time kind=struct quoted=false*/ - - { - if tok == fflib.FFTok_null { - - } else { - - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = j.Created.UnmarshalJSON(tbuf) - if err != nil { - return fs.WrapErr(err) - } - } - state = fflib.FFParse_after_value - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Flags: - - /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Flags = nil - } else { - - j.Flags = make(map[string]interface{}, 0) - - wantVal := true - - for { - - var k string - - var tmpJFlags interface{} - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ - - { - /* Falling back. type=interface {} kind=interface */ - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = json.Unmarshal(tbuf, &tmpJFlags) - if err != nil { - return fs.WrapErr(err) - } - } - - j.Flags[k] = tmpJFlags - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *containerStore) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *containerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtcontainerStorebase = iota - ffjtcontainerStorenosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *containerStore) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *containerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtcontainerStorebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtcontainerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtcontainerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtcontainerStorenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} diff --git a/vendor/github.com/containers/storage/docs/containers-storage-add-names.md b/vendor/github.com/containers/storage/docs/containers-storage-add-names.md index dceabb8fe98f..967881c605d0 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-add-names.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-add-names.md @@ -22,5 +22,4 @@ other layer, image, or container. **containers-storage add-names -n my-awesome-container -n my-for-realsies-awesome-container f3be6c6134d0d980936b4c894f1613b69a62b79588fdeda744d0be3693bde8ec** ## SEE ALSO -containers-storage-get-names(1) containers-storage-set-names(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-container.md b/vendor/github.com/containers/storage/docs/containers-storage-container.md index 29b2edf4696b..14915796740c 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-container.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-container.md @@ -16,5 +16,3 @@ to create it, any names that image has, and the ID of the container's layer. ## SEE ALSO containers-storage-containers(1) -containers-storage-image(1) -containers-storage-layer(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-get-container-data-digest.md b/vendor/github.com/containers/storage/docs/containers-storage-get-container-data-digest.md deleted file mode 100644 index 4a2e78306da9..000000000000 --- a/vendor/github.com/containers/storage/docs/containers-storage-get-container-data-digest.md +++ /dev/null @@ -1,20 +0,0 @@ -## containers-storage-get-container-data-digest 1 "August 2017" - -## NAME -containers-storage get-container-data-digest - Retrieve the digest of a lookaside data item - -## SYNOPSIS -**containers-storage** **get-container-data-digest** *containerNameOrID* *dataName* - -## DESCRIPTION -Prints the digest of the named data item which is associated with the specified -container. - -## EXAMPLE -**containers-storage get-container-data-digest my-container manifest.json** - -## SEE ALSO -containers-storage-get-container-data(1) -containers-storage-get-container-data-size(1) -containers-storage-list-container-data(1) -containers-storage-set-container-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-get-container-data-size.md b/vendor/github.com/containers/storage/docs/containers-storage-get-container-data-size.md deleted file mode 100644 index b92dec26c5de..000000000000 --- a/vendor/github.com/containers/storage/docs/containers-storage-get-container-data-size.md +++ /dev/null @@ -1,20 +0,0 @@ -## containers-storage-get-container-data-size 1 "August 2017" - -## NAME -containers-storage get-container-data-size - Retrieve the size of a lookaside data item - -## SYNOPSIS -**containers-storage** **get-container-data-size** *containerNameOrID* *dataName* - -## DESCRIPTION -Prints the size of the named data item which is associated with the specified -container. - -## EXAMPLE -**containers-storage get-container-data-size my-container blah.foo** - -## SEE ALSO -containers-storage-get-container-data(1) -containers-storage-get-container-data-digest(1) -containers-storage-list-container-data(1) -containers-storage-set-image-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-get-container-data.md b/vendor/github.com/containers/storage/docs/containers-storage-get-container-data.md index 0dbfdcca20b4..4e51eeeea62b 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-get-container-data.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-get-container-data.md @@ -19,6 +19,4 @@ Write the data to a file instead of stdout. ## SEE ALSO containers-storage-list-container-data(1) -containers-storage-get-container-data-size(1) -containers-storage-get-container-data-digest(1) containers-storage-set-container-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-get-image-data-digest.md b/vendor/github.com/containers/storage/docs/containers-storage-get-image-data-digest.md deleted file mode 100644 index 2b5c89d52769..000000000000 --- a/vendor/github.com/containers/storage/docs/containers-storage-get-image-data-digest.md +++ /dev/null @@ -1,20 +0,0 @@ -## containers-storage-get-image-data-digest 1 "August 2017" - -## NAME -containers-storage get-image-data-digest - Retrieve the digest of a lookaside data item - -## SYNOPSIS -**containers-storage** **get-image-data-digest** *imageNameOrID* *dataName* - -## DESCRIPTION -Prints the digest of the named data item which is associated with the specified -image. - -## EXAMPLE -**containers-storage get-image-data-digest my-image manifest.json** - -## SEE ALSO -containers-storage-get-image-data(1) -containers-storage-get-image-data-size(1) -containers-storage-list-image-data(1) -containers-storage-set-image-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-get-image-data-size.md b/vendor/github.com/containers/storage/docs/containers-storage-get-image-data-size.md deleted file mode 100644 index e946508eb6df..000000000000 --- a/vendor/github.com/containers/storage/docs/containers-storage-get-image-data-size.md +++ /dev/null @@ -1,20 +0,0 @@ -## containers-storage-get-image-data-size 1 "August 2017" - -## NAME -containers-storage get-image-data-size - Retrieve the size of a lookaside data item - -## SYNOPSIS -**containers-storage** **get-image-data-size** *imageNameOrID* *dataName* - -## DESCRIPTION -Prints the size of the named data item which is associated with the specified -image. - -## EXAMPLE -**containers-storage get-image-data-size my-image manifest.json** - -## SEE ALSO -containers-storage-get-image-data(1) -containers-storage-get-image-data-digest(1) -containers-storage-list-image-data(1) -containers-storage-set-image-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-get-image-data.md b/vendor/github.com/containers/storage/docs/containers-storage-get-image-data.md index ec853d72589d..07cb2e551e45 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-get-image-data.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-get-image-data.md @@ -19,6 +19,4 @@ Write the data to a file instead of stdout. ## SEE ALSO containers-storage-list-image-data(1) -containers-storage-get-image-data-size(1) -containers-storage-get-image-data-digest(1) containers-storage-set-image-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-get-names.md b/vendor/github.com/containers/storage/docs/containers-storage-get-names.md deleted file mode 100644 index 0532f372bbd0..000000000000 --- a/vendor/github.com/containers/storage/docs/containers-storage-get-names.md +++ /dev/null @@ -1,21 +0,0 @@ -## containers-storage-get-names 1 "September 2017" - -## NAME -containers-storage get-names - Get names of a layer/image/container - -## SYNOPSIS -**containers-storage** **get-names** *layerOrImageOrContainerNameOrID* - -## DESCRIPTION -In addition to IDs, *layers*, *images*, and *containers* can have -human-readable names assigned to them in *containers-storage*. The *get-names* -command can be used to read the list of names for any of them. - -## OPTIONS - -## EXAMPLE -**containers-storage get-names f3be6c6134d0d980936b4c894f1613b69a62b79588fdeda744d0be3693bde8ec** - -## SEE ALSO -containers-storage-add-names(1) -containers-storage-set-names(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-image.md b/vendor/github.com/containers/storage/docs/containers-storage-image.md index a72eb5312f7d..9988b0fef5da 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-image.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-image.md @@ -16,5 +16,3 @@ its top layer. ## SEE ALSO containers-storage-images(1) -containers-storage-layer(1) -containers-storage-container(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-layer.md b/vendor/github.com/containers/storage/docs/containers-storage-layer.md deleted file mode 100644 index 77064d602a07..000000000000 --- a/vendor/github.com/containers/storage/docs/containers-storage-layer.md +++ /dev/null @@ -1,19 +0,0 @@ -## containers-storage-layer 1 "September 2017" - -## NAME -containers-storage layer - Examine a single layer - -## SYNOPSIS -**containers-storage** **layer** *layerNameOrID* - -## DESCRIPTION -Retrieve information about an layer: its ID, any names it has, and the ID of -its parent, if it has one. - -## EXAMPLE -**containers-storage layer 49bff34e4baf9378c01733d02276a731a4c4771ebeab305020c5303679f88bb8** -**containers-storage layer my-favorite-layer** - -## SEE ALSO -containers-storage-image(1) -containers-storage-container(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-list-container-data.md b/vendor/github.com/containers/storage/docs/containers-storage-list-container-data.md index 0c7ccbc95726..f46332e4ca05 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-list-container-data.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-list-container-data.md @@ -14,6 +14,4 @@ List the pieces of named data which are associated with a container. ## SEE ALSO containers-storage-get-container-data(1) -containers-storage-get-container-data-size(1) -containers-storage-get-container-data-digest(1) containers-storage-set-container-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-list-image-data.md b/vendor/github.com/containers/storage/docs/containers-storage-list-image-data.md index 5dd8fc6f8ec3..b33cd1e6b7d5 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-list-image-data.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-list-image-data.md @@ -14,6 +14,4 @@ List the pieces of named data which are associated with an image. ## SEE ALSO containers-storage-get-image-data(1) -containers-storage-get-image-data-size(1) -containers-storage-get-image-data-digest(1) -containers-storage-set-image-data(1) +containers-storage-list-image-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-set-container-data.md b/vendor/github.com/containers/storage/docs/containers-storage-set-container-data.md index 4ee23730e63c..9cb30289f4fa 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-set-container-data.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-set-container-data.md @@ -18,7 +18,5 @@ Read the data contents from a file instead of stdin. **containers-storage set-container-data -f ./config.json my-container configuration** ## SEE ALSO -containers-storage-list-container-data(1) containers-storage-get-container-data(1) -containers-storage-get-container-data-size(1) -containers-storage-get-container-data-digest(1) +containers-storage-list-container-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-set-image-data.md b/vendor/github.com/containers/storage/docs/containers-storage-set-image-data.md index dd0abe66383f..09187d677c10 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-set-image-data.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-set-image-data.md @@ -18,7 +18,5 @@ Read the data contents from a file instead of stdin. **containers-storage set-image-data -f ./manifest.json my-image manifest** ## SEE ALSO -containers-storage-list-image-data(1) containers-storage-get-image-data(1) -containers-storage-get-image-data-size(1) -containers-storage-get-image-data-digest(1) +containers-storage-list-image-data(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage-set-names.md b/vendor/github.com/containers/storage/docs/containers-storage-set-names.md index 5768e7e84412..e4f676fd737b 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage-set-names.md +++ b/vendor/github.com/containers/storage/docs/containers-storage-set-names.md @@ -25,4 +25,3 @@ will be removed from the layer, image, or container. ## SEE ALSO containers-storage-add-names(1) -containers-storage-get-names(1) diff --git a/vendor/github.com/containers/storage/docs/containers-storage.conf.5.md b/vendor/github.com/containers/storage/docs/storage.conf.5.md similarity index 70% rename from vendor/github.com/containers/storage/docs/containers-storage.conf.5.md rename to vendor/github.com/containers/storage/docs/storage.conf.5.md index 3086ca473e32..39a8fb75bb26 100644 --- a/vendor/github.com/containers/storage/docs/containers-storage.conf.5.md +++ b/vendor/github.com/containers/storage/docs/storage.conf.5.md @@ -28,6 +28,7 @@ No bare options are used. The format of TOML can be simplified to: The `storage` table supports the following options: + **graphroot**="" container storage graph dir (default: "/var/lib/containers/storage") Default directory to store all writable content created by container storage programs @@ -40,21 +41,8 @@ The `storage` table supports the following options: container storage driver (default is "overlay") Default Copy On Write (COW) container storage driver -### STORAGE OPTIONS TABLE - -The `storage.options` table supports the following options: - **additionalimagestores**=[] - Paths to additional container image stores. Usually these are read/only and stored on remote network shares. - -**size**="" - Maximum size of a container image. Default is 10GB. This flag can be used to set quota - on the size of container images. - -**override_kernel_check**="" - Tell storage drivers to ignore kernel version checks. Some storage drivers assume that if a kernel is too - old, the driver is not supported. But for kernels that have had the drivers backported, this flag - allows users to override the checks + Paths to additional congtainer image stores. Usually these are read/only and stored on remote network shares. # HISTORY May 2017, Originally compiled by Dan Walsh diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index aa0da7ad0af6..2e7f1e659940 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -25,7 +25,6 @@ package aufs import ( "bufio" "fmt" - "io" "io/ioutil" "os" "os/exec" @@ -33,22 +32,22 @@ import ( "path/filepath" "strings" "sync" - "time" + "syscall" + + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" mountpk "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/stringid" + rsystem "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - "golang.org/x/sys/unix" ) var ( @@ -75,8 +74,6 @@ type Driver struct { ctr *graphdriver.RefCounter pathCacheLock sync.Mutex pathCache map[string]string - naiveDiff graphdriver.DiffDriver - locker *locker.Locker } // Init returns a new AUFS driver. @@ -86,7 +83,6 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") - } fsMagic, err := graphdriver.GetFSMagic(root) @@ -115,7 +111,6 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap gidMaps: gidMaps, pathCache: make(map[string]string), ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), - locker: locker.New(), } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) @@ -142,32 +137,6 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } } - logger := logrus.WithFields(logrus.Fields{ - "module": "graphdriver", - "driver": "aufs", - }) - - for _, path := range []string{"mnt", "diff"} { - p := filepath.Join(root, path) - entries, err := ioutil.ReadDir(p) - if err != nil { - logger.WithError(err).WithField("dir", p).Error("error reading dir entries") - continue - } - for _, entry := range entries { - if !entry.IsDir() { - continue - } - if strings.HasSuffix(entry.Name(), "-removing") { - logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") - if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { - logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") - } - } - } - } - - a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) return a, nil } @@ -231,22 +200,17 @@ func (a *Driver) Exists(id string) bool { return true } -// AdditionalImageStores returns additional image stores supported by the driver -func (a *Driver) AdditionalImageStores() []string { - return nil -} - // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return a.Create(id, parent, opts) +func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return a.Create(id, parent, mountLabel, storageOpt) } // Create three folders for each id // mnt, layers, and diff -func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { +func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - if opts != nil && len(opts.StorageOpt) != 0 { + if len(storageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for aufs") } @@ -261,7 +225,7 @@ func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { defer f.Close() if parent != "" { - ids, err := getParentIDs(a.rootPath(), parent) + ids, err := getParentIds(a.rootPath(), parent) if err != nil { return err } @@ -304,68 +268,35 @@ func (a *Driver) createDirsFor(id string) error { // Remove will unmount and remove the given id. func (a *Driver) Remove(id string) error { - a.locker.Lock(id) - defer a.locker.Unlock(id) a.pathCacheLock.Lock() mountpoint, exists := a.pathCache[id] a.pathCacheLock.Unlock() if !exists { mountpoint = a.getMountpoint(id) } - - logger := logrus.WithFields(logrus.Fields{ - "module": "graphdriver", - "driver": "aufs", - "layer": id, - }) - - var retries int - for { - mounted, err := a.mounted(mountpoint) - if err != nil { - if os.IsNotExist(err) { - break - } - return err - } - if !mounted { - break - } - - err = a.unmount(mountpoint) - if err == nil { - break - } - - if err != unix.EBUSY { - return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) - } - if retries >= 5 { - return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) - } - // If unmount returns EBUSY, it could be a transient error. Sleep and retry. - retries++ - logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) - time.Sleep(100 * time.Millisecond) + if err := a.unmount(mountpoint); err != nil { + // no need to return here, we can still try to remove since the `Rename` will fail below if still mounted + logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err) } - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "error removing layers dir for %s", id) + // Atomically remove each directory in turn by first moving it out of the + // way (so that container runtimes don't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + return err } + defer os.RemoveAll(tmpMntPath) - if err := atomicRemove(a.getDiffPath(id)); err != nil { - return errors.Wrapf(err, "could not remove diff path for id %s", id) + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err } + defer os.RemoveAll(tmpDiffpath) - // Atomically remove each directory in turn by first moving it out of the - // way (so that container runtime doesn't find it anymore) before doing removal of - // the whole tree. - if err := atomicRemove(mountpoint); err != nil { - if errors.Cause(err) == unix.EBUSY { - logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") - } - return errors.Wrapf(err, "could not remove mountpoint for id %s", id) + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err } a.pathCacheLock.Lock() @@ -374,29 +305,9 @@ func (a *Driver) Remove(id string) error { return nil } -func atomicRemove(source string) error { - target := source + "-removing" - - err := os.Rename(source, target) - switch { - case err == nil, os.IsNotExist(err): - case os.IsExist(err): - // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove - if _, e := os.Stat(source); !os.IsNotExist(e) { - return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up") - } - default: - return errors.Wrapf(err, "error preparing atomic delete") - } - - return system.EnsureRemoveAll(target) -} - // Get returns the rootfs path for the id. -// This will mount the dir at its given path +// This will mount the dir at it's given path func (a *Driver) Get(id, mountLabel string) (string, error) { - a.locker.Lock(id) - defer a.locker.Unlock(id) parents, err := a.getParentLayerPaths(id) if err != nil && !os.IsNotExist(err) { return "", err @@ -432,8 +343,6 @@ func (a *Driver) Get(id, mountLabel string) (string, error) { // Put unmounts and updates list of active mounts. func (a *Driver) Put(id string) error { - a.locker.Lock(id) - defer a.locker.Unlock(id) a.pathCacheLock.Lock() m, exists := a.pathCache[id] if !exists { @@ -452,22 +361,9 @@ func (a *Driver) Put(id string) error { return err } -// isParent returns if the passed in parent is the direct parent of the passed in layer -func (a *Driver) isParent(id, parent string) bool { - parents, _ := getParentIDs(a.rootPath(), id) - if parent == "" && len(parents) > 0 { - return false - } - return !(len(parents) > 0 && parent != parents[0]) -} - // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Diff(id, parent) - } - +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, @@ -477,6 +373,12 @@ func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { }) } +// AdditionalImageStores returns additional image stores supported by the driver +func (a *Driver) AdditionalImageStores() []string { + var imageStores []string + return imageStores +} + type fileGetNilCloser struct { storage.FileGetter } @@ -492,7 +394,7 @@ func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil } -func (a *Driver) applyDiff(id string, diff io.Reader) error { +func (a *Driver) applyDiff(id string, diff archive.Reader) error { return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ UIDMaps: a.uidMaps, GIDMaps: a.gidMaps, @@ -503,9 +405,6 @@ func (a *Driver) applyDiff(id string, diff io.Reader) error { // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (a *Driver) DiffSize(id, parent string) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.DiffSize(id, parent) - } // AUFS doesn't need the parent layer to calculate the diff size. return directory.Size(path.Join(a.rootPath(), "diff", id)) } @@ -513,12 +412,8 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) { // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. -func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.ApplyDiff(id, parent, diff) - } - - // AUFS doesn't need the parent id to apply the diff if it is the direct parent. +func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + // AUFS doesn't need the parent id to apply the diff. if err = a.applyDiff(id, diff); err != nil { return } @@ -529,10 +424,6 @@ func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err e // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Changes(id, parent) - } - // AUFS doesn't have snapshots, so we need to get changes from all parent // layers. layers, err := a.getParentLayerPaths(id) @@ -543,7 +434,7 @@ func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { } func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIDs(a.rootPath(), id) + parentIds, err := getParentIds(a.rootPath(), id) if err != nil { return nil, err } @@ -608,7 +499,7 @@ func (a *Driver) Cleanup() error { for _, m := range dirs { if err := a.unmount(m); err != nil { - logrus.Debugf("aufs error unmounting %s: %s", m, err) + logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err) } } return mountpk.Unmount(a.root) @@ -626,34 +517,45 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro offset := 54 if useDirperm() { - offset += len(",dirperm1") + offset += len("dirperm1") } - b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) - index := 0 - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - if bp+len(layer) > len(b) { - break + firstMount := true + i := 0 + + for { + for ; i < len(ro); i++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[i]) + + if firstMount { + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } else { + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } } - bp += copy(b[bp:], layer) - } - opts := "dio,xino=/dev/shm/aufs.xino" - if useDirperm() { - opts += ",dirperm1" - } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) - if err = mount("none", target, "aufs", 0, data); err != nil { - return - } + if firstMount { + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + firstMount = false + } - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) - if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { - return + if i == len(ro) { + break } } diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs_test.go b/vendor/github.com/containers/storage/drivers/aufs/aufs_test.go index be389ac09029..b67b4e3da85c 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs_test.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs_test.go @@ -9,7 +9,6 @@ import ( "io/ioutil" "os" "path" - "path/filepath" "sync" "testing" @@ -18,8 +17,6 @@ import ( "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var ( @@ -43,10 +40,6 @@ func testInit(dir string, t testing.TB) graphdriver.Driver { return d } -func driverGet(d *Driver, id string, mntLabel string) (string, error) { - return d.Get(id, mntLabel) -} - func newDriver(t testing.TB) *Driver { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) @@ -64,7 +57,7 @@ func TestNewDriver(t *testing.T) { d := testInit(tmp, t) defer os.RemoveAll(tmp) if d == nil { - t.Fatal("Driver should not be nil") + t.Fatalf("Driver should not be nil") } } @@ -109,7 +102,7 @@ func TestCreateNewDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } } @@ -118,7 +111,7 @@ func TestCreateNewDirStructure(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } @@ -139,7 +132,7 @@ func TestRemoveImage(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } @@ -155,10 +148,7 @@ func TestRemoveImage(t *testing.T) { for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { - t.Fatalf("Error should not be nil because dirs with id 1 should be deleted: %s", p) - } - if _, err := os.Stat(path.Join(tmp, p, "1-removing")); err == nil { - t.Fatalf("Error should not be nil because dirs with id 1-removing should be deleted: %s", p) + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) } } } @@ -167,7 +157,7 @@ func TestGetWithoutParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } @@ -185,15 +175,16 @@ func TestCleanupWithNoDirs(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - err := d.Cleanup() - assert.NoError(t, err) + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } } func TestCleanupWithDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } @@ -206,40 +197,55 @@ func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - err := d.Create("1", "", nil) - require.NoError(t, err) + if err := d.Create("1", "", "", nil); err != nil { + t.Fatal(err) + } response, err := d.mounted(d.getDiffPath("1")) - require.NoError(t, err) - assert.False(t, response) + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } } -func TestMountedTrueResponse(t *testing.T) { +func TestMountedTrueReponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() - err := d.Create("1", "", nil) - require.NoError(t, err) - err = d.Create("2", "1", nil) - require.NoError(t, err) + if err := d.Create("1", "", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", "", nil); err != nil { + t.Fatal(err) + } - _, err = d.Get("2", "") - require.NoError(t, err) + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } response, err := d.mounted(d.pathCache["2"]) - require.NoError(t, err) - assert.True(t, response) + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } } func TestMountWithParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } - if err := d.Create("2", "1", nil); err != nil { + if err := d.Create("2", "1", "", nil); err != nil { t.Fatal(err) } @@ -254,7 +260,7 @@ func TestMountWithParent(t *testing.T) { t.Fatal(err) } if mntPath == "" { - t.Fatal("mntPath should not be nil") + t.Fatal("mntPath should not be empty string") } expected := path.Join(tmp, "mnt", "2") @@ -267,10 +273,10 @@ func TestRemoveMountedDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } - if err := d.Create("2", "1", nil); err != nil { + if err := d.Create("2", "1", "", nil); err != nil { t.Fatal(err) } @@ -285,7 +291,7 @@ func TestRemoveMountedDir(t *testing.T) { t.Fatal(err) } if mntPath == "" { - t.Fatal("mntPath should not be empty") + t.Fatal("mntPath should not be empty string") } mounted, err := d.mounted(d.pathCache["2"]) @@ -294,7 +300,7 @@ func TestRemoveMountedDir(t *testing.T) { } if !mounted { - t.Fatal("Dir id 2 should be mounted") + t.Fatalf("Dir id 2 should be mounted") } if err := d.Remove("2"); err != nil { @@ -306,8 +312,8 @@ func TestCreateWithInvalidParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "storage", nil); err == nil { - t.Fatal("Error should not be nil with parent does not exist") + if err := d.Create("1", "storage", "", nil); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") } } @@ -315,11 +321,11 @@ func TestGetDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.CreateReadWrite("1", "", nil); err != nil { + if err := d.CreateReadWrite("1", "", "", nil); err != nil { t.Fatal(err) } - diffPath, err := driverGet(d, "1", "") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -341,7 +347,7 @@ func TestGetDiff(t *testing.T) { t.Fatal(err) } if a == nil { - t.Fatal("Archive should not be nil") + t.Fatalf("Archive should not be nil") } } @@ -349,11 +355,10 @@ func TestChanges(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } - - if err := d.CreateReadWrite("2", "1", nil); err != nil { + if err := d.CreateReadWrite("2", "1", "", nil); err != nil { t.Fatal(err) } @@ -363,7 +368,7 @@ func TestChanges(t *testing.T) { } }() - mntPoint, err := driverGet(d, "2", "") + mntPoint, err := d.Get("2", "") if err != nil { t.Fatal(err) } @@ -399,10 +404,10 @@ func TestChanges(t *testing.T) { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } - if err := d.CreateReadWrite("3", "2", nil); err != nil { + if err := d.CreateReadWrite("3", "2", "", nil); err != nil { t.Fatal(err) } - mntPoint, err = driverGet(d, "3", "") + mntPoint, err = d.Get("3", "") if err != nil { t.Fatal(err) } @@ -420,7 +425,7 @@ func TestChanges(t *testing.T) { t.Fatal(err) } - changes, err = d.Changes("3", "2") + changes, err = d.Changes("3", "") if err != nil { t.Fatal(err) } @@ -444,11 +449,11 @@ func TestDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.CreateReadWrite("1", "", nil); err != nil { + if err := d.CreateReadWrite("1", "", "", nil); err != nil { t.Fatal(err) } - diffPath, err := driverGet(d, "1", "") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -486,11 +491,11 @@ func TestChildDiffSize(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.CreateReadWrite("1", "", nil); err != nil { + if err := d.CreateReadWrite("1", "", "", nil); err != nil { t.Fatal(err) } - diffPath, err := driverGet(d, "1", "") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -522,11 +527,11 @@ func TestChildDiffSize(t *testing.T) { t.Fatalf("Expected size to be %d got %d", size, diffSize) } - if err := d.Create("2", "1", nil); err != nil { + if err := d.Create("2", "1", "", nil); err != nil { t.Fatal(err) } - diffSize, err = d.DiffSize("2", "1") + diffSize, err = d.DiffSize("2", "") if err != nil { t.Fatal(err) } @@ -541,12 +546,12 @@ func TestExists(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } if d.Exists("none") { - t.Fatal("id none should not exist in the driver") + t.Fatal("id name should not exist in the driver") } if !d.Exists("1") { @@ -559,13 +564,14 @@ func TestStatus(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", nil); err != nil { + if err := d.Create("1", "", "", nil); err != nil { t.Fatal(err) } status := d.Status() - assert.Len(t, status, 4) - + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } rootDir := status[0] dirs := status[2] if rootDir[0] != "Root Dir" { @@ -587,11 +593,11 @@ func TestApplyDiff(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.CreateReadWrite("1", "", nil); err != nil { + if err := d.CreateReadWrite("1", "", "", nil); err != nil { t.Fatal(err) } - diffPath, err := driverGet(d, "1", "") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -613,10 +619,10 @@ func TestApplyDiff(t *testing.T) { t.Fatal(err) } - if err := d.Create("2", "", nil); err != nil { + if err := d.Create("2", "", "", nil); err != nil { t.Fatal(err) } - if err := d.Create("3", "2", nil); err != nil { + if err := d.Create("3", "2", "", nil); err != nil { t.Fatal(err) } @@ -626,7 +632,7 @@ func TestApplyDiff(t *testing.T) { // Ensure that the file is in the mount point for id 3 - mountPoint, err := driverGet(d, "3", "") + mountPoint, err := d.Get("3", "") if err != nil { t.Fatal(err) } @@ -666,34 +672,48 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { } current = hash(current) - err := d.CreateReadWrite(current, parent, nil) - require.NoError(t, err, "current layer %d", i) - - point, err := driverGet(d, current, "") - require.NoError(t, err, "current layer %d", i) - + if err := d.CreateReadWrite(current, parent, "", nil); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } f, err := os.Create(path.Join(point, current)) - require.NoError(t, err, "current layer %d", i) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } f.Close() if i%10 == 0 { - err := os.Remove(path.Join(point, parent)) - require.NoError(t, err, "current layer %d", i) + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } expected-- } last = current } // Perform the actual mount for the top most image - point, err := driverGet(d, last, "") - require.NoError(t, err) + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } files, err := ioutil.ReadDir(point) - require.NoError(t, err) - assert.Len(t, files, expected) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } } func TestMountMoreThan42Layers(t *testing.T) { - defer os.RemoveAll(tmpOuter) + os.RemoveAll(tmpOuter) testMountMoreThan42Layers(t, tmp) } @@ -724,25 +744,25 @@ func BenchmarkConcurrentAccess(b *testing.B) { defer os.RemoveAll(tmp) defer d.Cleanup() - numConcurrent := 256 + numConcurent := 256 // create a bunch of ids var ids []string - for i := 0; i < numConcurrent; i++ { + for i := 0; i < numConcurent; i++ { ids = append(ids, stringid.GenerateNonCryptoID()) } - if err := d.Create(ids[0], "", nil); err != nil { + if err := d.Create(ids[0], "", "", nil); err != nil { b.Fatal(err) } - if err := d.Create(ids[1], ids[0], nil); err != nil { + if err := d.Create(ids[1], ids[0], "", nil); err != nil { b.Fatal(err) } parent := ids[1] ids = append(ids[2:]) - chErr := make(chan error, numConcurrent) + chErr := make(chan error, numConcurent) var outerGroup sync.WaitGroup outerGroup.Add(len(ids)) b.StartTimer() @@ -751,7 +771,7 @@ func BenchmarkConcurrentAccess(b *testing.B) { for _, id := range ids { go func(id string) { defer outerGroup.Done() - if err := d.Create(id, parent, nil); err != nil { + if err := d.Create(id, parent, "", nil); err != nil { b.Logf("Create %s failed", id) chErr <- err return @@ -780,23 +800,3 @@ func BenchmarkConcurrentAccess(b *testing.B) { } } } - -func TestInitStaleCleanup(t *testing.T) { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - for _, d := range []string{"diff", "mnt"} { - if err := os.MkdirAll(filepath.Join(tmp, d, "123-removing"), 0755); err != nil { - t.Fatal(err) - } - } - - testInit(tmp, t) - for _, d := range []string{"diff", "mnt"} { - if _, err := os.Stat(filepath.Join(tmp, d, "123-removing")); err == nil { - t.Fatal("cleanup failed") - } - } -} diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go index d2325fc46cdd..eb298d9eebdc 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -29,7 +29,7 @@ func loadIds(root string) ([]string, error) { // // If there are no lines in the file then the id has no parent // and an empty slice is returned. -func getParentIDs(root, id string) ([]string, error) { +func getParentIds(root, id string) ([]string, error) { f, err := os.Open(path.Join(root, "layers", id)) if err != nil { return nil, err diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go index 100e7537a9c2..8314f142bd80 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -4,9 +4,9 @@ package aufs import ( "os/exec" + "syscall" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) // Unmount the target specified. @@ -14,7 +14,7 @@ func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) } - if err := unix.Unmount(target, 0); err != nil { + if err := syscall.Unmount(target, 0); err != nil { return err } return nil diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go index 937104ba3fd0..8062bae420d0 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -1,7 +1,7 @@ package aufs -import "golang.org/x/sys/unix" +import "syscall" func mount(source string, target string, fstype string, flags uintptr, data string) error { - return unix.Mount(source, target, fstype, flags, data) + return syscall.Mount(source, target, fstype, flags, data) } diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go index d030b0663788..c807902df9aa 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -2,7 +2,7 @@ package aufs -import "errors" +import "github.com/pkg/errors" // MsRemount declared to specify a non-linux system mount. const MsRemount = 0 diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index abc856c836a4..9e16f89457ce 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -16,32 +16,31 @@ import "C" import ( "fmt" - "io/ioutil" - "math" "os" "path" "path/filepath" - "strconv" "strings" - "sync" + "syscall" "unsafe" "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/system" "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) func init() { graphdriver.Register("btrfs", Init) } +var ( + quotaEnabled = false + userDiskQuota = false +) + type btrfsOptions struct { minSpace uint64 size uint64 @@ -72,11 +71,18 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - opt, userDiskQuota, err := parseOptions(options) + opt, err := parseOptions(options) if err != nil { return nil, err } + if userDiskQuota { + if err := subvolEnableQuota(home); err != nil { + return nil, err + } + quotaEnabled = true + } + driver := &Driver{ home: home, uidMaps: uidMaps, @@ -84,48 +90,39 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap options: opt, } - if userDiskQuota { - if err := driver.subvolEnableQuota(); err != nil { - return nil, err - } - } - return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil } -func parseOptions(opt []string) (btrfsOptions, bool, error) { +func parseOptions(opt []string) (btrfsOptions, error) { var options btrfsOptions - userDiskQuota := false for _, option := range opt { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { - return options, userDiskQuota, err + return options, err } key = strings.ToLower(key) switch key { case "btrfs.min_space": minSpace, err := units.RAMInBytes(val) if err != nil { - return options, userDiskQuota, err + return options, err } userDiskQuota = true options.minSpace = uint64(minSpace) default: - return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + return options, fmt.Errorf("Unknown option %s", key) } } - return options, userDiskQuota, nil + return options, nil } // Driver contains information about the filesystem mounted. type Driver struct { //root of the file system - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - options btrfsOptions - quotaEnabled bool - once sync.Once + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions } // String prints the name of the driver (btrfs). @@ -154,8 +151,10 @@ func (d *Driver) Metadata(id string) (map[string]string, error) { // Cleanup unmounts the home directory. func (d *Driver) Cleanup() error { - if err := d.subvolDisableQuota(); err != nil { - return err + if quotaEnabled { + if err := subvolDisableQuota(d.home); err != nil { + return err + } } return mount.Unmount(d.home) @@ -198,7 +197,7 @@ func subvolCreate(path, name string) error { args.name[i] = C.char(c) } - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) @@ -226,7 +225,7 @@ func subvolSnapshot(src, dest, name string) error { C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) C.free(unsafe.Pointer(cs)) - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) @@ -235,8 +234,8 @@ func subvolSnapshot(src, dest, name string) error { } func isSubvolume(p string) (bool, error) { - var bufStat unix.Stat_t - if err := unix.Lstat(p, &bufStat); err != nil { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { return false, err } @@ -244,7 +243,7 @@ func isSubvolume(p string) (bool, error) { return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil } -func subvolDelete(dirpath, name string, quotaEnabled bool) error { +func subvolDelete(dirpath, name string) error { dir, err := openDir(dirpath) if err != nil { return err @@ -272,7 +271,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } @@ -283,27 +282,12 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } - if quotaEnabled { - if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { - var args C.struct_btrfs_ioctl_qgroup_create_args - args.qgroupid = C.__u64(qgroupid) - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) - } - } else { - logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) - } - } - // all subvolumes have been removed // now remove the one originally passed in for i, c := range []byte(name) { args.name[i] = C.char(c) } - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) @@ -311,27 +295,8 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { return nil } -func (d *Driver) updateQuotaStatus() { - d.once.Do(func() { - if !d.quotaEnabled { - // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed - if err := subvolQgroupStatus(d.home); err != nil { - // quota is still not enabled - return - } - d.quotaEnabled = true - } - }) -} - -func (d *Driver) subvolEnableQuota() error { - d.updateQuotaStatus() - - if d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) +func subvolEnableQuota(path string) error { + dir, err := openDir(path) if err != nil { return err } @@ -339,25 +304,17 @@ func (d *Driver) subvolEnableQuota() error { var args C.struct_btrfs_ioctl_quota_ctl_args args.cmd = C.BTRFS_QUOTA_CTL_ENABLE - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) } - d.quotaEnabled = true - return nil } -func (d *Driver) subvolDisableQuota() error { - d.updateQuotaStatus() - - if !d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) +func subvolDisableQuota(path string) error { + dir, err := openDir(path) if err != nil { return err } @@ -365,32 +322,24 @@ func (d *Driver) subvolDisableQuota() error { var args C.struct_btrfs_ioctl_quota_ctl_args args.cmd = C.BTRFS_QUOTA_CTL_DISABLE - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) } - d.quotaEnabled = false - return nil } -func (d *Driver) subvolRescanQuota() error { - d.updateQuotaStatus() - - if !d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) +func subvolRescanQuota(path string) error { + dir, err := openDir(path) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_quota_rescan_args - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) @@ -409,7 +358,7 @@ func subvolLimitQgroup(path string, size uint64) error { var args C.struct_btrfs_ioctl_qgroup_limit_args args.lim.max_referenced = C.__u64(size) args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) @@ -418,60 +367,6 @@ func subvolLimitQgroup(path string, size uint64) error { return nil } -// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path -// with search key of BTRFS_QGROUP_STATUS_KEY. -// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. -// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 -func subvolQgroupStatus(path string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_search_args - args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID - args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY - args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY - args.key.max_objectid = C.__u64(math.MaxUint64) - args.key.max_offset = C.__u64(math.MaxUint64) - args.key.max_transid = C.__u64(math.MaxUint64) - args.key.nr_items = 4096 - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) - } - sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) - if sh._type != C.BTRFS_QGROUP_STATUS_KEY { - return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) - } - return nil -} - -func subvolLookupQgroup(path string) (uint64, error) { - dir, err := openDir(path) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_ino_lookup_args - args.objectid = C.BTRFS_FIRST_FREE_OBJECTID - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) - } - if args.treeid == 0 { - return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) - } - - return uint64(args.treeid), nil -} - func (d *Driver) subvolumesDir() string { return path.Join(d.home, "subvolumes") } @@ -480,23 +375,14 @@ func (d *Driver) subvolumesDirID(id string) string { return path.Join(d.subvolumesDir(), id) } -func (d *Driver) quotasDir() string { - return path.Join(d.home, "quotas") -} - -func (d *Driver) quotasDirID(id string) string { - return path.Join(d.quotasDir(), id) -} - // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) } // Create the filesystem with given id. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - quotas := path.Join(d.home, "quotas") +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { subvolumes := path.Join(d.home, "subvolumes") rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { @@ -523,26 +409,14 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { } } - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - if _, ok := storageOpt["size"]; ok { driver := &Driver{} if err := d.parseStorageOpt(storageOpt, driver); err != nil { return err } - if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { return err } - if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { - return err - } } // if we have a remapped root (user namespaces enabled), change the created snapshot @@ -553,11 +427,6 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { } } - mountLabel := "" - if opts != nil { - mountLabel = opts.MountLabel - } - return label.Relabel(path.Join(subvolumes, id), mountLabel, false) } @@ -590,8 +459,11 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error { return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) } - if err := d.subvolEnableQuota(); err != nil { - return err + if !quotaEnabled { + if err := subvolEnableQuota(d.home); err != nil { + return err + } + quotaEnabled = true } if err := subvolLimitQgroup(dir, driver.options.size); err != nil { @@ -607,25 +479,13 @@ func (d *Driver) Remove(id string) error { if _, err := os.Stat(dir); err != nil { return err } - quotasDir := d.quotasDirID(id) - if _, err := os.Stat(quotasDir); err == nil { - if err := os.Remove(quotasDir); err != nil { - return err - } - } else if !os.IsNotExist(err) { - return err - } - - // Call updateQuotaStatus() to invoke status update - d.updateQuotaStatus() - - if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + if err := subvolDelete(d.subvolumesDir(), id); err != nil { return err } - if err := system.EnsureRemoveAll(dir); err != nil { + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } - if err := d.subvolRescanQuota(); err != nil { + if err := subvolRescanQuota(d.home); err != nil { return err } return nil @@ -643,17 +503,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { return "", fmt.Errorf("%s: not a directory", dir) } - if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { - if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { - if err := d.subvolEnableQuota(); err != nil { - return "", err - } - if err := subvolLimitQgroup(dir, size); err != nil { - return "", err - } - } - } - return dir, nil } @@ -673,5 +522,6 @@ func (d *Driver) Exists(id string) bool { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - return nil + var imageStores []string + return imageStores } diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs_test.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs_test.go index f8b5e15f4894..b6372f81bce6 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs_test.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs_test.go @@ -30,7 +30,7 @@ func TestBtrfsCreateSnap(t *testing.T) { func TestBtrfsSubvolDelete(t *testing.T) { d := graphtest.GetDriver(t, "btrfs") - if err := d.CreateReadWrite("test", "", nil); err != nil { + if err := d.CreateReadWrite("test", "", "", nil); err != nil { t.Fatal(err) } defer graphtest.PutDriver(t) diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_test.go b/vendor/github.com/containers/storage/drivers/btrfs/version_test.go index d78d57717934..15a6e75cb356 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/version_test.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_test.go @@ -8,6 +8,6 @@ import ( func TestLibVersion(t *testing.T) { if btrfsLibVersion() <= 0 { - t.Error("expected output from btrfs lib version > 0") + t.Errorf("expected output from btrfs lib version > 0") } } diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go index 72551a38d469..5ea604f5b6d4 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -22,21 +22,30 @@ func NewRefCounter(c Checker) *RefCounter { } } -// Increment increases the ref count for the given id and returns the current count +// Increment increaes the ref count for the given id and returns the current count func (c *RefCounter) Increment(path string) int { - return c.incdec(path, func(minfo *minfo) { - minfo.count++ - }) + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count++ + c.mu.Unlock() + return m.count } // Decrement decreases the ref count for the given id and returns the current count func (c *RefCounter) Decrement(path string) int { - return c.incdec(path, func(minfo *minfo) { - minfo.count-- - }) -} - -func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { c.mu.Lock() m := c.counts[path] if m == nil { @@ -52,8 +61,7 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { m.count++ } } - infoOp(m) - count := m.count + m.count-- c.mu.Unlock() - return count + return m.count } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go deleted file mode 100644 index 1430c8859c1d..000000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ /dev/null @@ -1,236 +0,0 @@ -package devmapper - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type directLVMConfig struct { - Device string - ThinpPercent uint64 - ThinpMetaPercent uint64 - AutoExtendPercent uint64 - AutoExtendThreshold uint64 -} - -var ( - errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") - errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") - errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") -) - -func validateLVMConfig(cfg directLVMConfig) error { - if reflect.DeepEqual(cfg, directLVMConfig{}) { - return nil - } - if cfg.Device == "" { - return errMissingSetupDevice - } - if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { - return errThinpPercentMissing - } - - if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { - return errThinpPercentTooBig - } - return nil -} - -func checkDevAvailable(dev string) error { - lvmScan, err := exec.LookPath("lvmdiskscan") - if err != nil { - logrus.Debug("could not find lvmdiskscan") - return nil - } - - out, err := exec.Command(lvmScan).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - if !bytes.Contains(out, []byte(dev)) { - return errors.Errorf("%s is not available for use with devicemapper", dev) - } - return nil -} - -func checkDevInVG(dev string) error { - pvDisplay, err := exec.LookPath("pvdisplay") - if err != nil { - logrus.Debug("could not find pvdisplay") - return nil - } - - out, err := exec.Command(pvDisplay, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) - for scanner.Scan() { - fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") - if len(fields) > 1 { - // got "VG Name" line" - vg := strings.TrimSpace(fields[1]) - if len(vg) > 0 { - return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) - } - logrus.Error(fields) - break - } - } - return nil -} - -func checkDevHasFS(dev string) error { - blkid, err := exec.LookPath("blkid") - if err != nil { - logrus.Debug("could not find blkid") - return nil - } - - out, err := exec.Command(blkid, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - fields := bytes.Fields(out) - for _, f := range fields { - kv := bytes.Split(f, []byte{'='}) - if bytes.Equal(kv[0], []byte("TYPE")) { - v := bytes.Trim(kv[1], "\"") - if len(v) > 0 { - return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) - } - return nil - } - } - return nil -} - -func verifyBlockDevice(dev string, force bool) error { - if err := checkDevAvailable(dev); err != nil { - return err - } - if err := checkDevInVG(dev); err != nil { - return err - } - - if force { - return nil - } - - if err := checkDevHasFS(dev); err != nil { - return err - } - return nil -} - -func readLVMConfig(root string) (directLVMConfig, error) { - var cfg directLVMConfig - - p := filepath.Join(root, "setup-config.json") - b, err := ioutil.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - return cfg, nil - } - return cfg, errors.Wrap(err, "error reading existing setup config") - } - - // check if this is just an empty file, no need to produce a json error later if so - if len(b) == 0 { - return cfg, nil - } - - err = json.Unmarshal(b, &cfg) - return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") -} - -func writeLVMConfig(root string, cfg directLVMConfig) error { - p := filepath.Join(root, "setup-config.json") - b, err := json.Marshal(cfg) - if err != nil { - return errors.Wrap(err, "error marshalling direct lvm config") - } - err = ioutil.WriteFile(p, b, 0600) - return errors.Wrap(err, "error writing direct lvm config to file") -} - -func setupDirectLVM(cfg directLVMConfig) error { - lvmProfileDir := "/etc/lvm/profile" - binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} - - for _, bin := range binaries { - if _, err := exec.LookPath(bin); err != nil { - return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") - } - } - - err := os.MkdirAll(lvmProfileDir, 0755) - if err != nil { - return errors.Wrap(err, "error creating lvm profile directory") - } - - if cfg.AutoExtendPercent == 0 { - cfg.AutoExtendPercent = 20 - } - - if cfg.AutoExtendThreshold == 0 { - cfg.AutoExtendThreshold = 80 - } - - if cfg.ThinpPercent == 0 { - cfg.ThinpPercent = 95 - } - if cfg.ThinpMetaPercent == 0 { - cfg.ThinpMetaPercent = 1 - } - - out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) - if err != nil { - return errors.Wrap(err, "error writing storage thinp autoextend profile") - } - - out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() - return errors.Wrap(err, string(out)) -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index 6db7b2b2c9fa..2608c49f4b41 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -12,41 +12,44 @@ import ( "os/exec" "path" "path/filepath" - "reflect" "strconv" "strings" "sync" + "syscall" "time" + "github.com/sirupsen/logrus" + "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/devicemapper" - "github.com/containers/storage/pkg/dmesg" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/loopback" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/parsers/kernel" - units "github.com/docker/go-units" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal driverDeferredRemovalSupport = false enableDeferredRemoval = false enableDeferredDeletion = false userBaseSize = false defaultMinFreeSpacePercent uint32 = 10 - lvmSetupConfigForce bool ) const deviceSetMetaFile string = "deviceset-metadata" @@ -119,8 +122,6 @@ type DeviceSet struct { uidMaps []idtools.IDMap gidMaps []idtools.IDMap minFreeSpacePercent uint32 //min free space percentage in thinpool - xfsNospaceRetries string // max retries when xfs receives ENOSPC - lvmSetupConfig directLVMConfig } // DiskUsage contains information about disk usage and is used when reporting Status of a device. @@ -169,7 +170,7 @@ type Status struct { MinFreeSpace uint64 } -// Structure used to export image/container metadata in inspect. +// Structure used to export image/container metadata in docker inspect. type deviceMetadata struct { deviceID int deviceSize uint64 // size in bytes @@ -378,7 +379,10 @@ func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { var mask byte i := deviceID % 8 mask = (1 << uint(i)) - return (devices.deviceIDMap[deviceID/8] & mask) == 0 + if (devices.deviceIDMap[deviceID/8] & mask) != 0 { + return false + } + return true } // Should be called with devices.Lock() held. @@ -405,8 +409,8 @@ func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { // This function relies on that device hash map has been loaded in advance. // Should be called with devices.Lock() held. func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debug("devmapper: constructDeviceIDMap()") - defer logrus.Debug("devmapper: constructDeviceIDMap() END") + logrus.Debugf("devmapper: constructDeviceIDMap()") + defer logrus.Debugf("devmapper: constructDeviceIDMap() END") for _, info := range devices.Devices { devices.markDeviceIDUsed(info.DeviceID) @@ -454,8 +458,8 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) } func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debug("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + logrus.Debugf("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") var scan = func(path string, info os.FileInfo, err error) error { if err != nil { @@ -475,10 +479,11 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { } // Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v)", hash) +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) info := &devInfo{ - Hash: hash, + Hash: hash, + DeviceID: id, } delete(devices.Devices, hash) @@ -523,7 +528,7 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo // Make sure deferred removal on device is canceled, if one was // scheduled. - if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + if err := devices.cancelDeferredRemoval(info); err != nil { return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) } @@ -534,11 +539,11 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) } -// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error -func xfsSupported() error { +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { // Make sure mkfs.xfs is available if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return err // error text is descriptive enough + return false } // Check if kernel supports xfs filesystem or not. @@ -546,48 +551,43 @@ func xfsSupported() error { f, err := os.Open("/proc/filesystems") if err != nil { - return errors.Wrapf(err, "error checking for xfs support") + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), "\txfs") { - return nil + return true } } if err := s.Err(); err != nil { - return errors.Wrapf(err, "error checking for xfs support") + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) } - - return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) + return false } func determineDefaultFS() string { - err := xfsSupported() - if err == nil { + if xfsSupported() { return "xfs" } - logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err) + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") return "ext4" } -// mkfsOptions tries to figure out whether some additional mkfs options are required -func mkfsOptions(fs string) []string { - if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) { - // For kernels earlier than 3.16 (and newer xfsutils), - // some xfs features need to be explicitly disabled. - return []string{"-m", "crc=0,finobt=0"} - } - - return []string{} -} - func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { devname := info.DevName() + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + if devices.filesystem == "" { devices.filesystem = determineDefaultFS() } @@ -595,11 +595,7 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { return err } - args := mkfsOptions(devices.filesystem) - args = append(args, devices.mkfsArgs...) - args = append(args, devname) - - logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) defer func() { if err != nil { logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) @@ -837,7 +833,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { } if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) + devices.unregisterDevice(deviceID, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return nil, err @@ -845,57 +841,11 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { return info, nil } -func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { - var ( - devinfo *devicemapper.Info - err error - ) - - if err = devices.poolHasFreeSpace(); err != nil { - return err - } - - if devices.deferredRemove { - devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) - if err != nil { - return err - } - if devinfo != nil && devinfo.DeferredRemove != 0 { - err = devices.cancelDeferredRemoval(baseInfo) - if err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrEnxio { - return err - } - devinfo = nil - } else { - defer devices.deactivateDevice(baseInfo) - } - } - } else { - devinfo, err = devicemapper.GetInfo(baseInfo.Name()) - if err != nil { - return err - } - } - - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { - return err - } - defer devicemapper.ResumeDevice(baseInfo.Name()) - } - - if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + if err := devices.poolHasFreeSpace(); err != nil { return err } - return nil -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { deviceID, err := devices.getNextFreeDeviceID() if err != nil { return err @@ -908,7 +858,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf } for { - if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { if devicemapper.DeviceIDExists(err) { // Device ID already exists. This should not // happen. Now we have a mechanism to find @@ -938,7 +888,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf } if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) + devices.unregisterDevice(deviceID, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return err @@ -1184,7 +1134,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { defer devices.deactivateDevice(info) - fsMountPoint := "/run/containers/storage/mnt" + fsMountPoint := "/run/containers/mnt" if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { if err := os.MkdirAll(fsMountPoint, 0700); err != nil { return err @@ -1200,10 +1150,10 @@ func (devices *DeviceSet) growFS(info *devInfo) error { options = joinMountOptions(options, devices.mountOptions) if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256))) + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) } - defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) switch devices.BaseDeviceFilesystem { case "ext4": @@ -1266,18 +1216,39 @@ func (devices *DeviceSet) setupBaseImage() error { } func setCloseOnExec(name string) { - fileInfos, _ := ioutil.ReadDir("/proc/self/fd") - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - unix.CloseOnExec(fd) + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } } } } } +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + func major(device uint64) uint64 { return (device >> 8) & 0xfff } @@ -1385,7 +1356,10 @@ func (devices *DeviceSet) saveTransactionMetaData() error { } func (devices *DeviceSet) removeTransactionMetaData() error { - return os.RemoveAll(devices.transactionMetaFile()) + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil } func (devices *DeviceSet) rollbackTransaction() error { @@ -1490,9 +1464,12 @@ func (devices *DeviceSet) closeTransaction() error { } func determineDriverCapabilities(version string) error { - // Kernel driver version >= 4.27.0 support deferred removal + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ - logrus.Debugf("devicemapper: kernel dm driver version is %s", version) + logrus.Debugf("devicemapper: driver version is %s", version) versionSplit := strings.Split(version, ".") major, err := strconv.Atoi(versionSplit[0]) @@ -1528,13 +1505,12 @@ func determineDriverCapabilities(version string) error { // Determine the major and minor number of loopback device func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - var stat unix.Stat_t - err := unix.Stat(file.Name(), &stat) + stat, err := file.Stat() if err != nil { return 0, 0, err } - dev := stat.Rdev + dev := stat.Sys().(*syscall.Stat_t).Rdev majorNum := major(dev) minorNum := minor(dev) @@ -1672,19 +1648,36 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error { return nil } -func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine version of device mapper") + } + + if err := determineDriverCapabilities(version); err != nil { + return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine device mapper driver capabilities") + } + if err := devices.enableDeferredRemovalDeletion(); err != nil { return err } // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") - - if !devices.overrideUdevSyncCheck { - return graphdriver.ErrNotSupported - } - } + // if supported := devicemapper.UdevSetSyncSupport(true); !supported { + // if storageversion.IAmStatic == "true" { + // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + // } else { + // logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + // } + // + // if !devices.overrideUdevSyncCheck { + // return graphdriver.ErrNotSupported + // } + // } //create the root dir of the devmapper driver ownership to match this //daemon's remapped root uid/gid so containers can start properly @@ -1699,47 +1692,20 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { return err } - prevSetupConfig, err := readLVMConfig(devices.root) - if err != nil { - return err - } - - if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { - if devices.thinPoolDevice != "" { - return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") - } + // Set the device prefix from the device id and inode of the container root dir - if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { - if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { - return errors.New("changing direct-lvm config is not supported") - } - logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") - if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { - return err - } - if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { - return err - } - if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { - return err - } - } - devices.thinPoolDevice = "storage-thinpool" - logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) - } - - // Set the device prefix from the device id and inode of the storage root dir - var st unix.Stat_t - if err := unix.Stat(devices.root, &st); err != nil { + st, err := os.Stat(devices.root) + if err != nil { return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) } + sysSt := st.Sys().(*syscall.Stat_t) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // container-maj,min[-inode] stands for: // - Managed by container storage // - The target of this device is at major and minor // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino) + devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) // Check for the existence of the thin-pool device @@ -1782,7 +1748,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { hasData := devices.hasImage("data") if !doInit && !hasData { - return errors.New("loopback data file not found") + return errors.New("Loopback data file not found") } if !hasData { @@ -1815,7 +1781,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { hasMetadata := devices.hasImage("metadata") if !doInit && !hasMetadata { - return errors.New("loopback metadata file not found") + return errors.New("Loopback metadata file not found") } if !hasMetadata { @@ -1845,14 +1811,6 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } - defer func() { - if retErr != nil { - err = devices.deactivatePool() - if err != nil { - logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) - } - } - }() } // Pool already exists and caller did not pass us a pool. That means @@ -1899,8 +1857,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { // AddDevice adds a device and registers in the hash. func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) // If a deleted device exists, return error. baseInfo, err := devices.lookupDeviceWithLock(baseHash) @@ -1937,7 +1895,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) } - if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + if err := devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { return err } @@ -2017,7 +1975,7 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro } if err == nil { - if err := devices.unregisterDevice(info.Hash); err != nil { + if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { return err } // If device was already in deferred delete state that means @@ -2038,8 +1996,8 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro // Issue discard only if device open count is zero. func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually. @@ -2072,16 +2030,7 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { } // Try to deactivate device in case it is active. - // If deferred removal is enabled and deferred deletion is disabled - // then make sure device is removed synchronously. There have been - // some cases of device being busy for short duration and we would - // rather busy wait for device removal to take care of these cases. - deferredRemove := devices.deferredRemove - if !devices.deferredDelete { - deferredRemove = false - } - - if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { + if err := devices.deactivateDevice(info); err != nil { logrus.Debugf("devmapper: Error deactivating device: %s", err) return err } @@ -2097,8 +2046,8 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { // removal. If one wants to override that and want DeleteDevice() to fail if // device was busy and could not be deleted, set syncDelete=true. func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) info, err := devices.lookupDeviceWithLock(hash) if err != nil { return err @@ -2114,8 +2063,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { } func (devices *DeviceSet) deactivatePool() error { - logrus.Debug("devmapper: deactivatePool() START") - defer logrus.Debug("devmapper: deactivatePool() END") + logrus.Debug("devmapper: deactivatePool()") + defer logrus.Debug("devmapper: deactivatePool END") devname := devices.getPoolDevName() devinfo, err := devicemapper.GetInfo(devname) @@ -2138,12 +2087,7 @@ func (devices *DeviceSet) deactivatePool() error { } func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - return devices.deactivateDeviceMode(info, devices.deferredRemove) -} - -func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { - var err error - logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) devinfo, err := devicemapper.GetInfo(info.Name()) @@ -2155,17 +2099,14 @@ func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove boo return nil } - if deferredRemove { - err = devicemapper.RemoveDeviceDeferred(info.Name()) + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } } else { - err = devices.removeDevice(info.Name()) - } - - // This function's semantics is such that it does not return an - // error if device does not exist. So if device went away by - // the time we actually tried to remove it, do not return error. - if errors.Cause(err) != devicemapper.ErrEnxio { - return err + if err := devices.removeDevice(info.Name()); err != nil { + return err + } } return nil } @@ -2196,53 +2137,41 @@ func (devices *DeviceSet) removeDevice(devname string) error { return err } -func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { if !devices.deferredRemove { return nil } - logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) - if err != nil { - return err - } if devinfo != nil && devinfo.DeferredRemove == 0 { return nil } // Cancel deferred remove - if err := devices.cancelDeferredRemoval(info); err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrBusy { - return err + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err == nil { + break } - } - return nil -} -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) - - var err error + if errors.Cause(err) == devicemapper.ErrEnxio { + // Device is probably already gone. Return success. + return nil + } - // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err != nil { - if errors.Cause(err) != devicemapper.ErrBusy { - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - continue - } + if errors.Cause(err) != devicemapper.ErrBusy { + return err } - break + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() } return err } @@ -2280,6 +2209,9 @@ func (devices *DeviceSet) Shutdown(home string) error { if err != nil { return err } + if p == path.Join(home, "mnt") { + return nil + } if !info.IsDir() { return nil } @@ -2288,7 +2220,7 @@ func (devices *DeviceSet) Shutdown(home string) error { // We use MNT_DETACH here in case it is still busy in some running // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. - if err := unix.Unmount(p, unix.MNT_DETACH); err != nil { + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) } } @@ -2331,34 +2263,6 @@ func (devices *DeviceSet) Shutdown(home string) error { return nil } -// Recent XFS changes allow changing behavior of filesystem in case of errors. -// When thin pool gets full and XFS gets ENOSPC error, currently it tries -// IO infinitely and sometimes it can block the container process -// and process can't be killWith 0 value, XFS will not retry upon error -// and instead will shutdown filesystem. - -func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { - dmDevicePath, err := os.Readlink(info.DevName()) - if err != nil { - return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) - } - - dmDeviceName := path.Base(dmDevicePath) - filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" - maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) - if err != nil { - return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) - } - defer maxRetriesFile.Close() - - // Set max retries to 0 - _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) - if err != nil { - return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) - } - return nil -} - // MountDevice mounts the device if not already mounted. func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { info, err := devices.lookupDeviceWithLock(hash) @@ -2396,15 +2300,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256))) - } - - if fstype == "xfs" && devices.xfsNospaceRetries != "" { - if err := devices.xfsSetNospaceRetries(info); err != nil { - unix.Unmount(path, unix.MNT_DETACH) - devices.deactivateDevice(info) - return err - } + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) } return nil @@ -2412,8 +2308,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { // UnmountDevice unmounts the device and removes it from hash. func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) info, err := devices.lookupDeviceWithLock(hash) if err != nil { @@ -2427,12 +2323,16 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { defer devices.Unlock() logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil { + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { return err } logrus.Debug("devmapper: Unmount done") - return devices.deactivateDevice(info) + if err := devices.deactivateDevice(info); err != nil { + return err + } + + return nil } // HasDevice returns true if the device metadata exists. @@ -2524,8 +2424,8 @@ func (devices *DeviceSet) MetadataDevicePath() string { } func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(unix.Statfs_t) - if err := unix.Statfs(loopFile, buf); err != nil { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) return 0, err } @@ -2634,25 +2534,22 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ minFreeSpacePercent: defaultMinFreeSpacePercent, } - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return nil, graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return nil, graphdriver.ErrNotSupported - } - - if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { - // enable deferred stuff by default - enableDeferredDeletion = true - enableDeferredRemoval = true + // Pick up initialization settings, if any were saved before + defaultsFile := path.Join(root, "defaults") + defaultsBytes, err := ioutil.ReadFile(defaultsFile) + defaults := []string{} + settings := map[string]string{} + if err == nil && len(defaultsBytes) > 0 { + defaults = strings.Split(string(defaultsBytes), "\n") } foundBlkDiscard := false - var lvmSetupConfig directLVMConfig - for _, option := range options { + nthOption := 0 + for _, option := range append(defaults, options...) { + nthOption = nthOption + 1 + if len(option) == 0 { + continue + } key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err @@ -2740,78 +2637,15 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ } devices.minFreeSpacePercent = uint32(minFreeSpacePercent) - case "dm.xfs_nospace_max_retries": - _, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return nil, err - } - devices.xfsNospaceRetries = val - case "dm.directlvm_device": - lvmSetupConfig.Device = val - case "dm.directlvm_device_force": - lvmSetupConfigForce, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.thinp_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) - } - if per >= 100 { - return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpPercent = per - case "dm.thinp_metapercent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) - } - if per >= 100 { - return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpMetaPercent = per - case "dm.thinp_autoextend_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendPercent = per - case "dm.thinp_autoextend_threshold": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendThreshold = per - case "dm.libdm_log_level": - level, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) - } - if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { - return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) - } - // Register a new logging callback with the specified level. - devicemapper.LogInit(devicemapper.DefaultLogger{ - Level: int(level), - }) default: - return nil, fmt.Errorf("devmapper: Unknown option %s", key) + if nthOption > len(defaults) { + return nil, fmt.Errorf("devmapper: Unknown option %s", key) + } + logrus.Errorf("devmapper: Unknown option %s, ignoring", key) } + settings[key] = val } - if err := validateLVMConfig(lvmSetupConfig); err != nil { - return nil, err - } - - devices.lvmSetupConfig = lvmSetupConfig - // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false @@ -2821,5 +2655,15 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ return nil, err } + // Save these settings along with the other metadata + defaults = []string{} + for key, val := range settings { + defaults = append(defaults, key+"="+val) + } + defaultsBytes = []byte(strings.Join(defaults, "\n") + "\n") + if err := ioutils.AtomicWriteFile(defaultsFile, defaultsBytes, 0600); err != nil { + return nil, err + } + return devices, nil } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_test.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_test.go index 7a0043d7b7eb..d24d4d0b59a6 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_test.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_test.go @@ -4,8 +4,6 @@ package devmapper import ( "fmt" - "os" - "syscall" "testing" "time" @@ -14,56 +12,16 @@ import ( ) func init() { - // Reduce the size of the base fs and loopback for the tests + // Reduce the size the the base fs and loopback for the tests defaultDataLoopbackSize = 300 * 1024 * 1024 defaultMetaDataLoopbackSize = 200 * 1024 * 1024 defaultBaseFsSize = 300 * 1024 * 1024 defaultUdevSyncOverride = true - if err := initLoopbacks(); err != nil { + if err := graphtest.InitLoopbacks(); err != nil { panic(err) } } -// initLoopbacks ensures that the loopback devices are properly created within -// the system running the device mapper tests. -func initLoopbacks() error { - statT, err := getBaseLoopStats() - if err != nil { - return err - } - // create at least 8 loopback files, ya, that is a good number - for i := 0; i < 8; i++ { - loopPath := fmt.Sprintf("/dev/loop%d", i) - // only create new loopback files if they don't exist - if _, err := os.Stat(loopPath); err != nil { - if mkerr := syscall.Mknod(loopPath, - uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { - return mkerr - } - os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) - } - } - return nil -} - -// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the -// loop0 device on the system. If it does not exist we assume 0,0,0660 for the -// stat data -func getBaseLoopStats() (*syscall.Stat_t, error) { - loop0, err := os.Stat("/dev/loop0") - if err != nil { - if os.IsNotExist(err) { - return &syscall.Stat_t{ - Uid: 0, - Gid: 0, - Mode: 0660, - }, nil - } - return nil, err - } - return loop0.Sys().(*syscall.Stat_t), nil -} - // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown func TestDevmapperSetup(t *testing.T) { @@ -101,7 +59,7 @@ func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataS defer graphtest.PutDriver(t) // make sure data or metadata loopback size are the default size if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { - t.Fatal("data or metadata loop back size is incorrect") + t.Fatalf("data or metadata loop back size is incorrect") } if err := driver.Cleanup(); err != nil { t.Fatal(err) @@ -116,7 +74,7 @@ func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataS } driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { - t.Fatal("data or metadata loop back size is incorrect") + t.Fatalf("data or metadata loop back size is incorrect") } if err := driver.Cleanup(); err != nil { t.Fatal(err) @@ -146,7 +104,7 @@ func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { // function return and we are deadlocked. Release lock // here so that cleanup could succeed and fail the test. driver.DeviceSet.Unlock() - t.Fatal("Could not acquire devices lock after call to cleanupDeletedDevices()") + t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()") case <-doneChan: } } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index d68fb66cc8d2..87a427a866b8 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -14,10 +14,8 @@ import ( "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/devicemapper" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/system" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) func init() { @@ -31,7 +29,6 @@ type Driver struct { uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter - locker *locker.Locker } // Init creates a driver with the given home and the set of options. @@ -51,7 +48,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - locker: locker.New(), } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil @@ -69,18 +65,18 @@ func (d *Driver) Status() [][2]string { status := [][2]string{ {"Pool Name", s.PoolName}, - {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, - {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, {"Backing Filesystem", s.BaseDeviceFS}, {"Data file", s.DataFile}, {"Metadata file", s.MetadataFile}, - {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, - {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, - {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, - {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, - {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, - {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, - {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, @@ -126,17 +122,12 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) } // Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { return err } @@ -146,8 +137,6 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { // Remove removes a device with a given id, unmounts the filesystem. func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) if !d.DeviceSet.HasDevice(id) { // Consider removing a non-existing device a no-op // This is useful to be able to progress on container removal @@ -157,15 +146,19 @@ func (d *Driver) Remove(id string) error { // This assumes the device has been properly Get/Put:ed and thus is unmounted if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return fmt.Errorf("failed to remove device %s: %v", id, err) + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err } - return system.EnsureRemoveAll(path.Join(d.home, "mnt", id)) + + return nil } // Get mounts a device with given id into the root filesystem func (d *Driver) Get(id, mountLabel string) (string, error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) mp := path.Join(d.home, "mnt", id) rootFs := path.Join(mp, "rootfs") if count := d.ctr.Increment(mp); count > 1 { @@ -216,8 +209,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put unmounts a device and removes it. func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) mp := path.Join(d.home, "mnt", id) if count := d.ctr.Decrement(mp); count > 0 { return nil @@ -236,5 +227,6 @@ func (d *Driver) Exists(id string) bool { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - return nil + var imageStores []string + return imageStores } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go index 1dc3262d273c..cca1fe1b385a 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -7,8 +7,7 @@ import ( "fmt" "os" "path/filepath" - - "golang.org/x/sys/unix" + "syscall" ) // FIXME: this is copy-pasted from the aufs driver. @@ -16,17 +15,19 @@ import ( // Mounted returns true if a mount point exists. func Mounted(mountpoint string) (bool, error) { - var mntpointSt unix.Stat_t - if err := unix.Stat(mountpoint, &mntpointSt); err != nil { + mntpoint, err := os.Stat(mountpoint) + if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } - var parentSt unix.Stat_t - if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { return false, err } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) return mntpointSt.Dev != parentSt.Dev, nil } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 569964784122..c16fc33e11d2 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -2,7 +2,6 @@ package graphdriver import ( "fmt" - "io" "os" "path/filepath" "strings" @@ -29,19 +28,12 @@ var ( // ErrNotSupported returned when driver is not supported. ErrNotSupported = errors.New("driver not supported") - // ErrPrerequisites returned when driver does not meet prerequisites. + // ErrPrerequisites retuned when driver does not meet prerequisites. ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") // ErrIncompatibleFS returned when file system is not supported. ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") ) -//CreateOpts contains optional arguments for Create() and CreateReadWrite() -// methods. -type CreateOpts struct { - MountLabel string - StorageOpt map[string]string -} - // InitFunc initializes the storage driver. type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) @@ -55,13 +47,11 @@ type ProtoDriver interface { // String returns a string representation of this driver. String() string // CreateReadWrite creates a new, empty filesystem layer that is ready - // to be used as the storage for a container. Additional options can - // be passed in opts. parent may be "" and opts may be nil. - CreateReadWrite(id, parent string, opts *CreateOpts) error + // to be used as the storage for a container. + CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error // Create creates a new, empty, filesystem layer with the - // specified id and parent and options passed in opts. Parent - // may be "" and opts may be nil. - Create(id, parent string, opts *CreateOpts) error + // specified id and parent and mountLabel. Parent and mountLabel may be "". + Create(id, parent, mountLabel string, storageOpt map[string]string) error // Remove attempts to remove the filesystem layer with this id. Remove(id string) error // Get returns the mountpoint for the layered filesystem referred @@ -88,48 +78,26 @@ type ProtoDriver interface { AdditionalImageStores() []string } -// DiffDriver is the interface to use to implement graph diffs -type DiffDriver interface { +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". - Diff(id, parent string) (io.ReadCloser, error) + Diff(id, parent string) (archive.Archive, error) // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. Changes(id, parent string) ([]archive.Change, error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. - // The io.Reader must be an uncompressed stream. - ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. DiffSize(id, parent string) (size int64, err error) } -// Driver is the interface for layered/snapshot file system drivers. -type Driver interface { - ProtoDriver - DiffDriver -} - -// Capabilities defines a list of capabilities a driver may implement. -// These capabilities are not required; however, they do determine how a -// graphdriver can be used. -type Capabilities struct { - // Flags that this driver is capable of reproducing exactly equivalent - // diffs for read-only layers. If set, clients can rely on the driver - // for consistent tar streams, and avoid extra processing to account - // for potential differences (eg: the layer store's use of tar-split). - ReproducesExactDiffs bool -} - -// CapabilityDriver is the interface for layered file system drivers that -// can report on their Capabilities. -type CapabilityDriver interface { - Capabilities() Capabilities -} - // DiffGetterDriver is the interface for layered file system drivers that // provide a specialized function for getting file contents for tar-split. type DiffGetterDriver interface { @@ -168,13 +136,15 @@ func Register(name string, initFunc InitFunc) error { } // GetDriver initializes and returns the registered driver -func GetDriver(name string, config Options) (Driver, error) { +func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) } - - logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) - return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) + if pluginDriver, err := lookupPlugin(name, home, options); err == nil { + return pluginDriver, nil + } + logrus.Errorf("Failed to GetDriver graph %s %s", name, home) + return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, home) } // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins @@ -186,24 +156,15 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) } -// Options is used to initialize a graphdriver -type Options struct { - Root string - DriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ExperimentalEnabled bool -} - // New creates the driver and initializes it at the specified root. -func New(name string, config Options) (Driver, error) { +func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { if name != "" { logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver - return GetDriver(name, config) + return GetDriver(name, root, options, uidMaps, gidMaps) } // Guess for prior driver - driversMap := scanPriorDrivers(config.Root) + driversMap := scanPriorDrivers(root) for _, name := range priority { if name == "vfs" { // don't use vfs even if there is state present. @@ -212,13 +173,13 @@ func New(name string, config Options) (Driver, error) { if _, prior := driversMap[name]; prior { // of the state found from prior drivers, check in order of our priority // which we would prefer - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) if err != nil { // unlike below, we will return error here, because there is prior // state, and now it is no longer supported/prereq/compatible, so // something changed and needs attention. Otherwise the daemon's // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) return nil, err } @@ -230,17 +191,17 @@ func New(name string, config Options) (Driver, error) { driversSlice = append(driversSlice, name) } - return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(driversSlice, ", ")) } - logrus.Infof("[graphdriver] using prior storage driver: %s", name) + logrus.Infof("[graphdriver] using prior storage driver %q", name) return driver, nil } } // Check for priority drivers first for _, name := range priority { - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) if err != nil { if isDriverNotSupported(err) { continue @@ -252,7 +213,7 @@ func New(name string, config Options) (Driver, error) { // Check all registered drivers if no priority driver is found for name, initFunc := range drivers { - driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps) if err != nil { if isDriverNotSupported(err) { continue diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index 53394b738d13..2891a84f3aca 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -1,10 +1,6 @@ package graphdriver -import ( - "syscall" - - "golang.org/x/sys/unix" -) +import "syscall" var ( // Slice of drivers that should be used in an order @@ -15,7 +11,7 @@ var ( // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf unix.Statfs_t + var buf syscall.Statfs_t if err := syscall.Statfs(mountPath, &buf); err != nil { return false, err } diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index 94f7270eaeec..6e1f2ee3c33b 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -4,9 +4,9 @@ package graphdriver import ( "path/filepath" + "syscall" "github.com/containers/storage/pkg/mount" - "golang.org/x/sys/unix" ) const ( @@ -66,14 +66,13 @@ var ( FsMagicAufs: "aufs", FsMagicBtrfs: "btrfs", FsMagicCramfs: "cramfs", - FsMagicEcryptfs: "ecryptfs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", FsMagicGPFS: "gpfs", FsMagicJffs2Fs: "jffs2", FsMagicJfs: "jfs", FsMagicNfsFs: "nfs", - FsMagicOverlay: "overlayfs", + FsMagicOverlay: "overlay", FsMagicRAMFs: "ramfs", FsMagicReiserFs: "reiserfs", FsMagicSmbFs: "smb", @@ -88,14 +87,14 @@ var ( // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { - var buf unix.Statfs_t - if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { return 0, err } return FsMagic(buf.Type), nil } -// NewFsChecker returns a checker configured for the provided FsMagic +// NewFsChecker returns a checker configured for the provied FsMagic func NewFsChecker(t FsMagic) Checker { return &fsChecker{ t: t, @@ -127,8 +126,8 @@ func (c *defaultChecker) IsMounted(path string) bool { // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf unix.Statfs_t - if err := unix.Statfs(mountPath, &buf); err != nil { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { return false, err } return FsMagic(buf.Type) == fsType, nil diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go index 174fa9670bf1..cfbc26e84644 100644 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -19,8 +19,8 @@ import ( "path/filepath" "unsafe" - "github.com/containers/storage/pkg/mount" - "github.com/sirupsen/logrus" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" ) const ( @@ -45,52 +45,22 @@ func GetFSMagic(rootpath string) (FsMagic, error) { return 0, nil } -type fsChecker struct { - t FsMagic -} - -func (c *fsChecker) IsMounted(path string) bool { - m, _ := Mounted(c.t, path) - return m -} - -// NewFsChecker returns a checker configured for the provided FsMagic -func NewFsChecker(t FsMagic) Checker { - return &fsChecker{ - t: t, - } -} - -// NewDefaultChecker returns a check that parses /proc/mountinfo to check -// if the specified path is mounted. -// No-op on Solaris. -func NewDefaultChecker() Checker { - return &defaultChecker{} -} - -type defaultChecker struct { -} - -func (c *defaultChecker) IsMounted(path string) bool { - m, _ := mount.Mounted(path) - return m -} - // Mounted checks if the given path is mounted as the fs type //Solaris supports only ZFS for now func Mounted(fsType FsMagic, mountPath string) (bool, error) { cs := C.CString(filepath.Dir(mountPath)) - defer C.free(unsafe.Pointer(cs)) buf := C.getstatfs(cs) - defer C.free(unsafe.Pointer(buf)) // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || (buf.f_basetype[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) - return false, ErrPrerequisites + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", mountPath) } + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) return true, nil } diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index 48a1f078f084..693107295e92 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -1,14 +1,14 @@ package graphdriver import ( - "io" "time" + "github.com/sirupsen/logrus" + "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" - "github.com/sirupsen/logrus" ) var ( @@ -31,30 +31,30 @@ type NaiveDiffDriver struct { // NewNaiveDiffDriver returns a fully functional driver that wraps the // given ProtoDriver and adds the capability of the following methods which // it may or may not support on its own: -// Diff(id, parent string) (io.ReadCloser, error) +// Diff(id, parent string) (archive.Archive, error) // Changes(id, parent string) ([]archive.Change, error) -// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) // DiffSize(id, parent string) (size int64, err error) func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps} + gdw := &NaiveDiffDriver{ + ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + return gdw } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { - startTime := time.Now() - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + layerFs, err := gdw.Get(id, "") if err != nil { return nil, err } defer func() { if err != nil { - driver.Put(id) + gdw.Put(id) } }() @@ -65,16 +65,16 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - driver.Put(id) + gdw.Put(id) return err }), nil } - parentFs, err := driver.Get(parent, "") + parentFs, err := gdw.Get(parent, "") if err != nil { return nil, err } - defer driver.Put(parent) + defer gdw.Put(parent) changes, err := archive.ChangesDirs(layerFs, parentFs) if err != nil { @@ -88,13 +88,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - driver.Put(id) - - // NaiveDiffDriver compares file metadata with parent layers. Parent layers - // are extracted from tar's with full second precision on modified time. - // We need this hack here to make sure calls within same second receive - // correct result. - time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + gdw.Put(id) return err }), nil } @@ -102,22 +96,20 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") + layerFs, err := gdw.Get(id, "") if err != nil { return nil, err } - defer driver.Put(id) + defer gdw.Put(id) parentFs := "" if parent != "" { - parentFs, err = driver.Get(parent, "") + parentFs, err = gdw.Get(parent, "") if err != nil { return nil, err } - defer driver.Put(parent) + defer gdw.Put(parent) } return archive.ChangesDirs(layerFs, parentFs) @@ -126,15 +118,13 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { - driver := gdw.ProtoDriver - +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := driver.Get(id, "") + layerFs, err := gdw.Get(id, "") if err != nil { return } - defer driver.Put(id) + defer gdw.Put(id) options := &archive.TarOptions{UIDMaps: gdw.uidMaps, GIDMaps: gdw.gidMaps} @@ -152,18 +142,16 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { - driver := gdw.ProtoDriver - changes, err := gdw.Changes(id, parent) if err != nil { return } - layerFs, err := driver.Get(id, "") + layerFs, err := gdw.Get(id, "") if err != nil { return } - defer driver.Put(id) + defer gdw.Put(id) return archive.ChangesSize(layerFs, changes), nil } diff --git a/vendor/github.com/containers/storage/drivers/graphtest/graphbench_unix.go b/vendor/github.com/containers/storage/drivers/graphtest/graphbench_unix.go index 1b58ab106bc6..5498eac30a0e 100644 --- a/vendor/github.com/containers/storage/drivers/graphtest/graphbench_unix.go +++ b/vendor/github.com/containers/storage/drivers/graphtest/graphbench_unix.go @@ -19,7 +19,7 @@ func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } @@ -38,7 +38,7 @@ func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...strin base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } @@ -62,7 +62,8 @@ func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...strin defer PutDriver(b) base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + + if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } @@ -91,7 +92,8 @@ func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driverop defer PutDriver(b) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + + if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } @@ -99,7 +101,7 @@ func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driverop b.Fatal(err) } - if err := driver.Create(upper, base, nil); err != nil { + if err := driver.Create(upper, base, "", nil); err != nil { b.Fatal(err) } @@ -126,7 +128,8 @@ func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, drive defer PutDriver(b) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + + if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } @@ -134,7 +137,7 @@ func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, drive b.Fatal(err) } - if err := driver.Create(upper, base, nil); err != nil { + if err := driver.Create(upper, base, "", nil); err != nil { b.Fatal(err) } @@ -149,7 +152,7 @@ func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, drive b.StopTimer() for i := 0; i < b.N; i++ { diff := stringid.GenerateRandomID() - if err := driver.Create(diff, base, nil); err != nil { + if err := driver.Create(diff, base, "", nil); err != nil { b.Fatal(err) } @@ -188,7 +191,8 @@ func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, d defer PutDriver(b) base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + + if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } @@ -221,7 +225,8 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d defer PutDriver(b) base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + + if err := driver.Create(base, "", "", nil); err != nil { b.Fatal(err) } diff --git a/vendor/github.com/containers/storage/drivers/graphtest/graphtest_unix.go b/vendor/github.com/containers/storage/drivers/graphtest/graphtest_unix.go index 229121ef48be..fbee026f9563 100644 --- a/vendor/github.com/containers/storage/drivers/graphtest/graphtest_unix.go +++ b/vendor/github.com/containers/storage/drivers/graphtest/graphtest_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd solaris +// +build linux freebsd package graphtest @@ -9,6 +9,7 @@ import ( "os" "path" "reflect" + "syscall" "testing" "unsafe" @@ -16,9 +17,6 @@ import ( "github.com/containers/storage/pkg/stringid" "github.com/docker/go-units" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" ) var ( @@ -36,10 +34,15 @@ type Driver struct { func newDriver(t testing.TB, name string, options []string) *Driver { root, err := ioutil.TempDir("", "storage-graphtest-") - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } - require.NoError(t, os.MkdirAll(root, 0755)) - d, err := graphdriver.GetDriver(name, graphdriver.Options{DriverOptions: options, Root: root}) + d, err := graphdriver.GetDriver(name, root, options, nil, nil) if err != nil { t.Logf("graphdriver: %v\n", err) cause := errors.Cause(err) @@ -85,11 +88,14 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) - err := driver.Create("empty", "", nil) - require.NoError(t, err) + if err := driver.Create("empty", "", "", nil); err != nil { + t.Fatal(err) + } defer func() { - require.NoError(t, driver.Remove("empty")) + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } }() if !driver.Exists("empty") { @@ -97,14 +103,21 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str } dir, err := driver.Get("empty", "") - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } verifyFile(t, dir, 0755|os.ModeDir, 0, 0) // Verify that the directory is empty fis, err := readDir(dir) - require.NoError(t, err) - assert.Len(t, fis, 0) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } driver.Put("empty") } @@ -116,7 +129,9 @@ func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...stri createBase(t, driver, "Base") defer func() { - require.NoError(t, driver.Remove("Base")) + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } }() verifyBase(t, driver, "Base") } @@ -127,14 +142,21 @@ func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...stri defer PutDriver(t) createBase(t, driver, "Base") + defer func() { - require.NoError(t, driver.Remove("Base")) + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } }() - err := driver.Create("Snap", "Base", nil) - require.NoError(t, err) + if err := driver.Create("Snap", "Base", "", nil); err != nil { + t.Fatal(err) + } + defer func() { - require.NoError(t, driver.Remove("Snap")) + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } }() verifyBase(t, driver, "Snap") @@ -146,7 +168,8 @@ func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, dr defer PutDriver(t) base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + + if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } @@ -178,9 +201,8 @@ func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverO upper := stringid.GenerateRandomID() deleteFile := "file-remove.txt" deleteFileContent := []byte("This file should get removed in upper!") - deleteDir := "var/lib" - if err := driver.Create(base, "", nil); err != nil { + if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } @@ -192,11 +214,7 @@ func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverO t.Fatal(err) } - if err := addDirectory(driver, base, deleteDir); err != nil { - t.Fatal(err) - } - - if err := driver.Create(upper, base, nil); err != nil { + if err := driver.Create(upper, base, "", nil); err != nil { t.Fatal(err) } @@ -204,7 +222,7 @@ func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverO t.Fatal(err) } - if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { + if err := removeFile(driver, upper, deleteFile); err != nil { t.Fatal(err) } @@ -214,7 +232,7 @@ func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverO } diff := stringid.GenerateRandomID() - if err := driver.Create(diff, base, nil); err != nil { + if err := driver.Create(diff, base, "", nil); err != nil { t.Fatal(err) } @@ -255,10 +273,6 @@ func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverO if err := checkFileRemoved(driver, diff, deleteFile); err != nil { t.Fatal(err) } - - if err := checkFileRemoved(driver, diff, deleteDir); err != nil { - t.Fatal(err) - } } // DriverTestChanges tests computed changes on a layer matches changes made @@ -267,7 +281,8 @@ func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) defer PutDriver(t) base := stringid.GenerateRandomID() upper := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { + + if err := driver.Create(base, "", "", nil); err != nil { t.Fatal(err) } @@ -275,7 +290,7 @@ func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) t.Fatal(err) } - if err := driver.Create(upper, base, nil); err != nil { + if err := driver.Create(upper, base, "", nil); err != nil { t.Fatal(err) } @@ -317,10 +332,9 @@ func DriverTestSetQuota(t *testing.T, drivername string) { defer PutDriver(t) createBase(t, driver, "Base") - createOpts := &graphdriver.CreateOpts{} - createOpts.StorageOpt = make(map[string]string, 1) - createOpts.StorageOpt["size"] = "50M" - if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + storageOpt := make(map[string]string, 1) + storageOpt["size"] = "50M" + if err := driver.Create("zfsTest", "Base", "", storageOpt); err != nil { t.Fatal(err) } @@ -331,8 +345,8 @@ func DriverTestSetQuota(t *testing.T, drivername string) { quota := uint64(50 * units.MiB) err = writeRandomFile(path.Join(mountPath, "file"), quota*2) - if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT { - t.Fatalf("expect write() to fail with %v, got %v", unix.EDQUOT, err) + if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { + t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) } } diff --git a/vendor/github.com/containers/storage/drivers/graphtest/testutil.go b/vendor/github.com/containers/storage/drivers/graphtest/testutil.go index b50fbc187c18..0a6182f1c98e 100644 --- a/vendor/github.com/containers/storage/drivers/graphtest/testutil.go +++ b/vendor/github.com/containers/storage/drivers/graphtest/testutil.go @@ -78,29 +78,14 @@ func addFile(drv graphdriver.Driver, layer, filename string, content []byte) err return ioutil.WriteFile(path.Join(root, filename), content, 0755) } -func addDirectory(drv graphdriver.Driver, layer, dir string) error { +func removeFile(drv graphdriver.Driver, layer, filename string) error { root, err := drv.Get(layer, "") if err != nil { return err } defer drv.Put(layer) - return os.MkdirAll(path.Join(root, dir), 0755) -} - -func removeAll(drv graphdriver.Driver, layer string, names ...string) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - for _, filename := range names { - if err := os.RemoveAll(path.Join(root, filename)); err != nil { - return err - } - } - return nil + return os.Remove(path.Join(root, filename)) } func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { @@ -275,7 +260,7 @@ func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, lastLayer := baseLayer for i := 1; i <= count; i++ { nextLayer := stringid.GenerateRandomID() - if err := drv.Create(nextLayer, lastLayer, nil); err != nil { + if err := drv.Create(nextLayer, lastLayer, "", nil); err != nil { return "", err } if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { diff --git a/vendor/github.com/containers/storage/drivers/graphtest/testutil_unix.go b/vendor/github.com/containers/storage/drivers/graphtest/testutil_unix.go index 919485006a66..732a5b832d96 100644 --- a/vendor/github.com/containers/storage/drivers/graphtest/testutil_unix.go +++ b/vendor/github.com/containers/storage/drivers/graphtest/testutil_unix.go @@ -3,6 +3,7 @@ package graphtest import ( + "fmt" "io/ioutil" "os" "path" @@ -10,52 +11,118 @@ import ( "testing" "github.com/containers/storage/drivers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" ) +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { fi, err := os.Stat(path) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } - actual := fi.Mode() - assert.Equal(t, mode&os.ModeType, actual&os.ModeType, path) - assert.Equal(t, mode&os.ModePerm, actual&os.ModePerm, path) - assert.Equal(t, mode&os.ModeSticky, actual&os.ModeSticky, path) - assert.Equal(t, mode&os.ModeSetuid, actual&os.ModeSetuid, path) - assert.Equal(t, mode&os.ModeSetgid, actual&os.ModeSetgid, path) + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - assert.Equal(t, uid, stat.Uid, path) - assert.Equal(t, gid, stat.Gid, path) + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } } } func createBase(t testing.TB, driver graphdriver.Driver, name string) { // We need to be able to set any perms - oldmask := unix.Umask(0) - defer unix.Umask(oldmask) + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) - err := driver.CreateReadWrite(name, "", nil) - require.NoError(t, err) + if err := driver.CreateReadWrite(name, "", "", nil); err != nil { + t.Fatal(err) + } dir, err := driver.Get(name, "") - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } defer driver.Put(name) subdir := path.Join(dir, "a subdir") - require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky)) - require.NoError(t, os.Chown(subdir, 1, 2)) + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } file := path.Join(dir, "a file") - err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid) - require.NoError(t, err) + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } } func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { dir, err := driver.Get(name, "") - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } defer driver.Put(name) subdir := path.Join(dir, "a subdir") @@ -64,7 +131,13 @@ func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { file := path.Join(dir, "a file") verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) - files, err := readDir(dir) - require.NoError(t, err) - assert.Len(t, files, 2) + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + } diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go deleted file mode 100644 index 2a096edf6f1b..000000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build linux - -package overlay - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "syscall" - - "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// doesSupportNativeDiff checks whether the filesystem has a bug -// which copies up the opaque flag when copying up an opaque -// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. -// When these exist naive diff should be used. -func doesSupportNativeDiff(d string) error { - td, err := ioutil.TempDir(d, "opaque-bug-check") - if err != nil { - return err - } - defer func() { - if err := os.RemoveAll(td); err != nil { - logrus.Warnf("Failed to remove check directory %v: %v", td, err) - } - }() - - // Make directories l1/d, l1/d1, l2/d, l3, work, merged - if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { - return err - } - if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { - return err - } - if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { - return err - } - - // Mark l2/d as opaque - if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { - return errors.Wrap(err, "failed to set opaque flag on middle layer") - } - - opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) - if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { - return errors.Wrap(err, "failed to mount overlay") - } - defer func() { - if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { - logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) - } - }() - - // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" - if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { - return errors.Wrap(err, "failed to write to merged directory") - } - - // Check l3/d does not have opaque flag - xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") - if err != nil { - return errors.Wrap(err, "failed to read opaque flag on upper layer") - } - if string(xattrOpaque) == "y" { - return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") - } - - // rename "d1" to "d2" - if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { - // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled - if err.(*os.LinkError).Err == syscall.EXDEV { - return nil - } - return errors.Wrap(err, "failed to rename dir in merged directory") - } - // get the xattr of "d2" - xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect") - if err != nil { - return errors.Wrap(err, "failed to read redirect flag on upper layer") - } - - if string(xattrRedirect) == "d1" { - return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") - } - - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index feb0395924c0..1b53f0c8aee3 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -9,9 +9,9 @@ import ( "fmt" "os" "runtime" + "syscall" "github.com/containers/storage/pkg/reexec" - "golang.org/x/sys/unix" ) func init() { @@ -31,12 +31,12 @@ type mountOptions struct { Flag uint32 } -func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { +func mountFrom(dir, device, target, mType, label string) error { options := &mountOptions{ Device: device, Target: target, Type: mType, - Flag: uint32(flags), + Flag: 0, Label: label, } @@ -49,19 +49,18 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e output := bytes.NewBuffer(nil) cmd.Stdout = output cmd.Stderr = output + if err := cmd.Start(); err != nil { - w.Close() return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { - w.Close() return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) } w.Close() if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) } return nil } @@ -81,7 +80,7 @@ func mountFromMain() { fatal(err) } - if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { fatal(err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index d224406e759f..f3cb27e2aef3 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -5,7 +5,6 @@ package overlay import ( "bufio" "fmt" - "io" "io/ioutil" "os" "os/exec" @@ -13,26 +12,21 @@ import ( "path/filepath" "strconv" "strings" - "sync" + "syscall" + + "github.com/sirupsen/logrus" "github.com/containers/storage/drivers" - "github.com/containers/storage/drivers/overlayutils" - "github.com/containers/storage/drivers/quota" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" - "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/parsers/kernel" - "github.com/containers/storage/pkg/system" - units "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) var ( @@ -48,7 +42,7 @@ var ( // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers -// below as well as "merged" and "work" directories. The "diff" directory +// below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost @@ -82,43 +76,26 @@ const ( idLength = 26 ) -type overlayOptions struct { - overrideKernelCheck bool - imageStores []string - quota quota.Quota -} - // Driver contains information about the home directory and the list of active mounts that are created using this driver. type Driver struct { - name string - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - quotaCtl *quota.Control - options overlayOptions - naiveDiff graphdriver.DiffDriver - supportsDType bool - locker *locker.Locker + name string + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + opts *overlayOptions } -var ( - backingFs = "" - projectQuotaSupported = false - - useNaiveDiffLock sync.Once - useNaiveDiffOnly bool -) +var backingFs = "" func init() { - graphdriver.Register("overlay", Init) - graphdriver.Register("overlay2", Init) + graphdriver.Register("overlay", InitAsOverlay) + graphdriver.Register("overlay2", InitAsOverlay2) } -// Init returns the a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { +// InitWithName returns the a naive diff driver for the overlay filesystem, +// which returns the passed-in name when asked which driver it is. +func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err @@ -137,7 +114,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap if !opts.overrideKernelCheck { return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") } - logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update") + logrus.Warnf("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update") } fsMagic, err := graphdriver.GetFSMagic(home) @@ -150,19 +127,9 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: logrus.Errorf("'overlay' is not supported over %s", backingFs) return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) - case graphdriver.FsMagicBtrfs: - // Support for OverlayFS on BTRFS was added in kernel 4.7 - // See https://btrfs.wiki.kernel.org/index.php/Changelog - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs) - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs) - } - logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update") - } } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) @@ -178,46 +145,37 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - supportsDType, err := fsutils.SupportsDType(home) - if err != nil { - return nil, err - } - if !supportsDType { - logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) - // TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4 - // return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) - } - d := &Driver{ - name: "overlay", - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), - supportsDType: supportsDType, - locker: locker.New(), - options: *opts, - } - - d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) - - if backingFs == "xfs" { - // Try to enable project quota support over xfs. - if d.quotaCtl, err = quota.NewControl(home); err == nil { - projectQuotaSupported = true - } else if opts.quota.Size > 0 { - return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err) - } - } else if opts.quota.Size > 0 { - // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. - return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs) + name: name, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + opts: opts, } - logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) - return d, nil } +// InitAsOverlay returns the a naive diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func InitAsOverlay(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + return InitWithName("overlay", home, options, uidMaps, gidMaps) +} + +// InitAsOverlay2 returns the a naive diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + return InitWithName("overlay2", home, options, uidMaps, gidMaps) +} + +type overlayOptions struct { + overrideKernelCheck bool + imageStores []string +} + func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { @@ -227,21 +185,12 @@ func parseOptions(options []string) (*overlayOptions, error) { } key = strings.ToLower(key) switch key { - case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check": - logrus.Debugf("overlay: override_kernelcheck=%s", val) + case "overlay.override_kernel_check", "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } - case ".size", "overlay.size", "overlay2.size": - logrus.Debugf("overlay: size=%s", val) - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - o.quota.Size = uint64(size) - case ".imagestore", "overlay.imagestore", "overlay2.imagestore": - logrus.Debugf("overlay: imagestore=%s", val) + case "overlay.imagestore": // Additional read only image stores to use for lower paths for _, store := range strings.Split(val, ",") { store = filepath.Clean(store) @@ -250,7 +199,7 @@ func parseOptions(options []string) (*overlayOptions, error) { } st, err := os.Stat(store) if err != nil { - return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) + return nil, fmt.Errorf("overlay: Can't stat imageStore dir %s: %v", store, err) } if !st.IsDir() { return nil, fmt.Errorf("overlay: image path %q must be a directory", store) @@ -285,16 +234,6 @@ func supportsOverlay() error { return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") } -func useNaiveDiff(home string) bool { - useNaiveDiffLock.Do(func() { - if err := doesSupportNativeDiff(home); err != nil { - logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) - useNaiveDiffOnly = true - } - }) - return useNaiveDiffOnly -} - func (d *Driver) String() string { return d.name } @@ -304,8 +243,6 @@ func (d *Driver) String() string { func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, - {"Supports d_type", strconv.FormatBool(d.supportsDType)}, - {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } @@ -343,39 +280,18 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { - return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") - } - - if opts == nil { - opts = &graphdriver.CreateOpts{ - StorageOpt: map[string]string{}, - } - } - - if _, ok := opts.StorageOpt["size"]; !ok { - if opts.StorageOpt == nil { - opts.StorageOpt = map[string]string{} - } - opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) - } - - return d.create(id, parent, opts) +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - if opts != nil && len(opts.StorageOpt) != 0 { - if _, ok := opts.StorageOpt["size"]; ok { - return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") - } +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { + + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") } - return d.create(id, parent, opts) -} -func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) @@ -396,20 +312,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr } }() - if opts != nil && len(opts.StorageOpt) > 0 { - driver := &Driver{} - if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { - return err - } - - if driver.options.quota.Size > 0 { - // Set container disk quota limit - if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { - return err - } - } - } - if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { return err } @@ -449,26 +351,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr return nil } -// Parse overlay storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { - // Read size to set the disk project quota per container - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return err - } - driver.options.quota.Size = uint64(size) - default: - return fmt.Errorf("Unknown option %s", key) - } - } - - return nil -} - func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) @@ -495,11 +377,11 @@ func (d *Driver) getLower(parent string) (string, error) { return strings.Join(lowers, ":"), nil } -func (d *Driver) dir(id string) string { - newpath := path.Join(d.home, id) +func (d *Driver) dir(val string) string { + newpath := path.Join(d.home, val) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { - l := path.Join(p, d.name, id) + l := path.Join(p, d.name, val) _, err = os.Stat(l) if err == nil { return l @@ -529,8 +411,6 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -539,16 +419,14 @@ func (d *Driver) Remove(id string) error { } } - if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return "", err @@ -580,7 +458,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) } } else { - lower = newpath + lower = l } if newlowers == "" { newlowers = lower @@ -594,42 +472,22 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { return mergedDir, nil } defer func() { - if retErr != nil { + if err != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { - if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { - logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr) - } + syscall.Unmount(mergedDir, 0) } } }() workDir := path.Join(dir, "work") - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, diffDir, workDir) - mountData := label.FormatMountLabel(opts, mountLabel) - mount := unix.Mount - mountTarget := mergedDir - - pageSize := unix.Getpagesize() - - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. - if len(mountData) > pageSize { - //FIXME: We need to figure out to get this to work with additional stores - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) - mountData = label.FormatMountLabel(opts, mountLabel) - if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) - } - - mount = func(source string, target string, mType string, flags uintptr, label string) error { - return mountFrom(d.home, source, target, mType, flags, label) - } - mountTarget = path.Join(id, "merged") + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, path.Join(id, "diff"), path.Join(id, "work")) + mountLabel = label.FormatMountLabel(opts, mountLabel) + if len(mountLabel) > syscall.Getpagesize() { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) } - if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err) + + if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a @@ -648,27 +506,19 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return err - } mountpoint := path.Join(d.dir(id), "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil { - // If no lower, we used the diff directory, so no work to do - if os.IsNotExist(err) { + err := syscall.Unmount(mountpoint, 0) + if err != nil { + if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil { + // We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway return nil } - return err - } - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { - logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) } - return nil + return err } // Exists checks to see if the id is already mounted. @@ -677,33 +527,8 @@ func (d *Driver) Exists(id string) bool { return err == nil } -// isParent returns if the passed in parent is the direct parent of the passed in layer -func (d *Driver) isParent(id, parent string) bool { - lowers, err := d.getLowerDirs(id) - if err != nil { - return false - } - if parent == "" && len(lowers) > 0 { - return false - } - - parentDir := d.dir(parent) - var ld string - if len(lowers) > 0 { - ld = filepath.Dir(lowers[0]) - } - if ld == "" && parent == "" { - return true - } - return ld == parentDir -} - // ApplyDiff applies the new layer into a root -func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { - if !d.isParent(id, parent) { - return d.naiveDiff.ApplyDiff(id, parent, diff) - } - +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { applyDir := d.getDiffPath(id) logrus.Debugf("Applying tar in %s", applyDir) @@ -716,7 +541,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64 return 0, err } - return directory.Size(applyDir) + return d.DiffSize(id, parent) } func (d *Driver) getDiffPath(id string) string { @@ -729,19 +554,12 @@ func (d *Driver) getDiffPath(id string) string { // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.DiffSize(id, parent) - } return directory.Size(d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". -func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Diff(id, parent) - } - +func (d *Driver) Diff(id, parent string) (archive.Archive, error) { diffPath := d.getDiffPath(id) logrus.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ @@ -755,9 +573,6 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Changes(id, parent) - } // Overlay doesn't have snapshots, so we need to get changes from all parent // layers. diffPath := d.getDiffPath(id) @@ -771,5 +586,5 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - return d.options.imageStores + return d.opts.imageStores } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_test.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_test.go index 74299a228c8a..39fd09fde96d 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_test.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_test.go @@ -3,15 +3,14 @@ package overlay import ( - "io/ioutil" "os" + "syscall" "testing" "github.com/containers/storage/drivers" "github.com/containers/storage/drivers/graphtest" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" - "golang.org/x/sys/unix" ) const driverName = "overlay" @@ -33,19 +32,7 @@ func cdMountFrom(dir, device, target, mType, label string) error { os.Chdir(dir) defer os.Chdir(wd) - return unix.Mount(device, target, mType, 0, label) -} - -func skipIfNaive(t *testing.T) { - td, err := ioutil.TempDir("", "naive-check-") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(td) - - if useNaiveDiff(td) { - t.Skipf("Cannot run test with naive diff") - } + return syscall.Mount(device, target, mType, 0, label) } // This avoids creating a new driver for each test if all tests are run @@ -71,12 +58,10 @@ func TestOverlay128LayerRead(t *testing.T) { } func TestOverlayDiffApply10Files(t *testing.T) { - skipIfNaive(t) graphtest.DriverTestDiffApply(t, 10, driverName) } func TestOverlayChanges(t *testing.T) { - skipIfNaive(t) graphtest.DriverTestChanges(t, driverName) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go index fc565ef0ba60..975b3a50fb30 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -12,7 +12,6 @@ import ( "time" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) // generateID creates a new random string identifier with the given length @@ -70,7 +69,7 @@ func retryOnError(err error) bool { case *os.PathError: return retryOnError(err.Err) // unpack the target error case syscall.Errno: - if err == unix.EPERM { + if err == syscall.EPERM { // EPERM represents an entropy pool exhaustion, a condition under // which we backoff and retry. return true diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go deleted file mode 100644 index 467733647c36..000000000000 --- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux - -package overlayutils - -import ( - "errors" - "fmt" -) - -// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. -func ErrDTypeNotSupported(driver, backingFs string) error { - msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) - if backingFs == "xfs" { - msg += " Reformat the filesystem with ftype=1 to enable d_type support." - } - msg += " Running without d_type is not supported." - return errors.New(msg) -} diff --git a/vendor/github.com/containers/storage/drivers/plugin.go b/vendor/github.com/containers/storage/drivers/plugin.go new file mode 100644 index 000000000000..a76aae6e01b0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/plugin.go @@ -0,0 +1,32 @@ +// +build experimental + +package graphdriver + +import ( + "fmt" + "io" + + "github.com/containers/storage/pkg/plugins" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + pl, err := plugins.Get(name, "GraphDriver") + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, home, opts, pl.Client()) +} + +func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { + proxy := &graphDriverProxy{name, c} + return proxy, proxy.Init(home, opts) +} diff --git a/vendor/github.com/containers/storage/drivers/plugin_unsupported.go b/vendor/github.com/containers/storage/drivers/plugin_unsupported.go new file mode 100644 index 000000000000..daa7a170e415 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/plugin_unsupported.go @@ -0,0 +1,7 @@ +// +build !experimental + +package graphdriver + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + return nil, ErrNotSupported +} diff --git a/vendor/github.com/containers/storage/drivers/proxy.go b/vendor/github.com/containers/storage/drivers/proxy.go new file mode 100644 index 000000000000..d56b8731f189 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/proxy.go @@ -0,0 +1,225 @@ +// +build experimental + +package graphdriver + +import ( + "fmt" + + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +type graphDriverProxy struct { + name string + client pluginClient +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string +} + +func (d *graphDriverProxy) Init(home string, opts []string) error { + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return ret.Dir, err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) Metadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Metadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.client.Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return archive.Archive(body), nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go deleted file mode 100644 index 93e74437132f..000000000000 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build linux - -// -// projectquota.go - implements XFS project quota controls -// for setting quota limits on a newly created directory. -// It currently supports the legacy XFS specific ioctls. -// -// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR -// for both xfs/ext4 for kernel version >= v4.5 -// - -package quota - -/* -#include -#include -#include -#include -#include - -#ifndef FS_XFLAG_PROJINHERIT -struct fsxattr { - __u32 fsx_xflags; - __u32 fsx_extsize; - __u32 fsx_nextents; - __u32 fsx_projid; - unsigned char fsx_pad[12]; -}; -#define FS_XFLAG_PROJINHERIT 0x00000200 -#endif -#ifndef FS_IOC_FSGETXATTR -#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) -#endif -#ifndef FS_IOC_FSSETXATTR -#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) -#endif - -#ifndef PRJQUOTA -#define PRJQUOTA 2 -#endif -#ifndef XFS_PROJ_QUOTA -#define XFS_PROJ_QUOTA 2 -#endif -#ifndef Q_XSETPQLIM -#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) -#endif -#ifndef Q_XGETPQUOTA -#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) -#endif -*/ -import "C" -import ( - "fmt" - "io/ioutil" - "path" - "path/filepath" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Quota limit params - currently we only control blocks hard limit -type Quota struct { - Size uint64 -} - -// Control - Context to be used by storage driver (e.g. overlay) -// who wants to apply project quotas to container dirs -type Control struct { - backingFsBlockDev string - nextProjectID uint32 - quotas map[string]uint32 -} - -// NewControl - initialize project quota support. -// Test to make sure that quota can be set on a test dir and find -// the first project id to be used for the next container create. -// -// Returns nil (and error) if project quota is not supported. -// -// First get the project id of the home directory. -// This test will fail if the backing fs is not xfs. -// -// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: -// echo 999:/var/lib/containers/storage/overlay >> /etc/projects -// echo storage:999 >> /etc/projid -// xfs_quota -x -c 'project -s storage' / -// -// In that case, the home directory project id will be used as a "start offset" -// and all containers will be assigned larger project ids (e.g. >= 1000). -// This is a way to prevent xfs_quota management from conflicting with containers/storage. -// -// Then try to create a test directory with the next project id and set a quota -// on it. If that works, continue to scan existing containers to map allocated -// project ids. -// -func NewControl(basePath string) (*Control, error) { - // - // Get project id of parent dir as minimal id to be used by driver - // - minProjectID, err := getProjectID(basePath) - if err != nil { - return nil, err - } - minProjectID++ - - // - // create backing filesystem device node - // - backingFsBlockDev, err := makeBackingFsDev(basePath) - if err != nil { - return nil, err - } - - // - // Test if filesystem supports project quotas by trying to set - // a quota on the first available project id - // - quota := Quota{ - Size: 0, - } - if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { - return nil, err - } - - q := Control{ - backingFsBlockDev: backingFsBlockDev, - nextProjectID: minProjectID + 1, - quotas: make(map[string]uint32), - } - - // - // get first project id to be used for next container - // - err = q.findNextProjectID(basePath) - if err != nil { - return nil, err - } - - logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) - return &q, nil -} - -// SetQuota - assign a unique project id to directory and set the quota limits -// for that project id -func (q *Control) SetQuota(targetPath string, quota Quota) error { - - projectID, ok := q.quotas[targetPath] - if !ok { - projectID = q.nextProjectID - - // - // assign project id to new container directory - // - err := setProjectID(targetPath, projectID) - if err != nil { - return err - } - - q.quotas[targetPath] = projectID - q.nextProjectID++ - } - - // - // set the quota limit for the container's project id - // - logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) - return setProjectQuota(q.backingFsBlockDev, projectID, quota) -} - -// setProjectQuota - set the quota for project id on xfs block device -func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { - var d C.fs_disk_quota_t - d.d_version = C.FS_DQUOT_VERSION - d.d_id = C.__u32(projectID) - d.d_flags = C.XFS_PROJ_QUOTA - - d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT - d.d_blk_hardlimit = C.__u64(quota.Size / 512) - d.d_blk_softlimit = d.d_blk_hardlimit - - var cs = C.CString(backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", - projectID, backingFsBlockDev, errno.Error()) - } - - return nil -} - -// GetQuota - get the quota limits of a directory that was configured with SetQuota -func (q *Control) GetQuota(targetPath string, quota *Quota) error { - - projectID, ok := q.quotas[targetPath] - if !ok { - return fmt.Errorf("quota not found for path : %s", targetPath) - } - - // - // get the quota limit for the container's project id - // - var d C.fs_disk_quota_t - - var cs = C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, - uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", - projectID, q.backingFsBlockDev, errno.Error()) - } - quota.Size = uint64(d.d_blk_hardlimit) * 512 - - return nil -} - -// getProjectID - get the project id of path on xfs -func getProjectID(targetPath string) (uint32, error) { - dir, err := openDir(targetPath) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) - } - - return uint32(fsx.fsx_projid), nil -} - -// setProjectID - set the project id of path on xfs -func setProjectID(targetPath string, projectID uint32) error { - dir, err := openDir(targetPath) - if err != nil { - return err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) - } - fsx.fsx_projid = C.__u32(projectID) - fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT - _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) - } - - return nil -} - -// findNextProjectID - find the next project id to be used for containers -// by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID(home string) error { - files, err := ioutil.ReadDir(home) - if err != nil { - return fmt.Errorf("read directory failed : %s", home) - } - for _, file := range files { - if !file.IsDir() { - continue - } - path := filepath.Join(home, file.Name()) - projid, err := getProjectID(path) - if err != nil { - return err - } - if projid > 0 { - q.quotas[path] = projid - } - if q.nextProjectID <= projid { - q.nextProjectID = projid + 1 - } - } - - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -// Get the backing block device of the driver home directory -// and create a block device node under the home directory -// to be used by quotactl commands -func makeBackingFsDev(home string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(home, &stat); err != nil { - return "", err - } - - backingFsBlockDev := path.Join(home, "backingFsBlockDev") - // Re-create just in case someone copied the home directory over to a new device - unix.Unlink(backingFsBlockDev) - if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) - } - - return backingFsBlockDev, nil -} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index ae62207d1762..ff7a88f1a218 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -4,18 +4,17 @@ import ( "fmt" "os" "path/filepath" - "strings" "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" ) var ( // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar + CopyWithTar = chrootarchive.CopyWithTar ) func init() { @@ -26,22 +25,16 @@ func init() { // This sets the home directory for the driver and returns NaiveDiffDriver. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { d := &Driver{ - homes: []string{home}, - idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, } - rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { return nil, err } - for _, option := range options { - if strings.HasPrefix(option, "vfs.imagestore=") { - d.homes = append(d.homes, strings.Split(option[15:], ",")...) - continue - } - if strings.HasPrefix(option, ".imagestore=") { - d.homes = append(d.homes, strings.Split(option[12:], ",")...) - continue - } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } @@ -51,8 +44,9 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { - homes []string - idMappings *idtools.IDMappings + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap } func (d *Driver) String() string { @@ -76,26 +70,29 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) } // Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 { +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + if len(storageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for vfs") } dir := d.dir(id) - rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { return err } - if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { return err } - labelOpts := []string{"level:s0"} - if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { label.SetFileLabel(dir, mountLabel) } if parent == "" { @@ -105,26 +102,22 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err != nil { return fmt.Errorf("%s: %s", parent, err) } - return CopyWithTar(parentDir, dir) + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil } func (d *Driver) dir(id string) string { - for i, home := range d.homes { - if i > 0 { - home = filepath.Join(home, d.String()) - } - candidate := filepath.Join(home, "dir", filepath.Base(id)) - fi, err := os.Stat(candidate) - if err == nil && fi.IsDir() { - return candidate - } - } - return filepath.Join(d.homes[0], "dir", filepath.Base(id)) + return filepath.Join(d.home, "dir", filepath.Base(id)) } // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { - return system.EnsureRemoveAll(d.dir(id)) + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil } // Get returns the directory for the given id. @@ -153,8 +146,6 @@ func (d *Driver) Exists(id string) bool { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - if len(d.homes) > 1 { - return d.homes[1:] - } - return nil + var imageStores []string + return imageStores } diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index abe2ac432107..7ab36513c104 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -6,7 +6,6 @@ import ( "bufio" "bytes" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -17,7 +16,6 @@ import ( "strings" "sync" "syscall" - "time" "unsafe" "github.com/Microsoft/go-winio" @@ -31,37 +29,17 @@ import ( "github.com/containers/storage/pkg/longpath" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/system" - units "github.com/docker/go-units" "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" + "github.com/vbatts/tar-split/tar/storage" ) // filterDriver is an HCSShim driver type for the Windows Filter driver. const filterDriver = 1 -var ( - // mutatedFiles is a list of files that are mutated by the import process - // and must be backed up and restored. - mutatedFiles = map[string]string{ - "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", - } - noreexec = false -) - // init registers the windows graph drivers to the register. func init() { graphdriver.Register("windowsfilter", InitFilter) - // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes - // debugging issues in the re-exec codepath significantly easier. - if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { - logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") - noreexec = true - } else { - reexec.Register("docker-windows-write-layer", writeLayerReexec) - } + reexec.Register("storage-windows-write-layer", writeLayer) } type checker struct { @@ -82,22 +60,13 @@ type Driver struct { cache map[string]string } +func isTP5OrOlder() bool { + return system.GetOSVersion().Build <= 14300 +} + // InitFilter returns a new Windows storage filter driver. func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) - - fsType, err := getFileSystemType(string(home[0])) - if err != nil { - return nil, err - } - if strings.ToLower(fsType) == "refs" { - return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) - } - - if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { - return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) - } - d := &Driver{ info: hcsshim.DriverInfo{ HomeDir: home, @@ -109,37 +78,6 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) return d, nil } -// win32FromHresult is a helper function to get the win32 error code from an HRESULT -func win32FromHresult(hr uintptr) uintptr { - if hr&0x1fff0000 == 0x00070000 { - return hr & 0xffff - } - return hr -} - -// getFileSystemType obtains the type of a file system through GetVolumeInformation -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx -func getFileSystemType(drive string) (fsType string, hr error) { - var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") - buf = make([]uint16, 255) - size = windows.MAX_PATH + 1 - ) - if len(drive) != 1 { - hr = errors.New("getFileSystemType must be called with a drive letter") - return - } - drive += `:\` - n := uintptr(unsafe.Pointer(nil)) - r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - fsType = windows.UTF16ToString(buf) - return -} - // String returns the string representation of a driver. This should match // the name the graph driver has been registered with. func (d *Driver) String() string { @@ -153,19 +91,8 @@ func (d *Driver) Status() [][2]string { } } -// panicIfUsedByLcow does exactly what it says. -// TODO @jhowardmsft - this is a temporary measure for the bring-up of -// Linux containers on Windows. It is a failsafe to ensure that the right -// graphdriver is used. -func panicIfUsedByLcow() { - if system.LCOWSupported() { - panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") - } -} - // Exists returns true if the given id is registered with this driver. func (d *Driver) Exists(id string) bool { - panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return false @@ -179,24 +106,20 @@ func (d *Driver) Exists(id string) bool { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - panicIfUsedByLcow() - if opts != nil { - return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) - } - return d.create(id, parent, "", false, nil) +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.create(id, parent, mountLabel, false, storageOpt) } // Create creates a new read-only layer with the given id. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - panicIfUsedByLcow() - if opts != nil { - return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) - } - return d.create(id, parent, "", true, nil) +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.create(id, parent, mountLabel, true, storageOpt) } func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for windows") + } + rPId, err := d.resolveID(parent) if err != nil { return err @@ -210,7 +133,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt var layerChain []string if rPId != "" { - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + parentPath, err := hcsshim.LayerMountPath(d.info, rPId) if err != nil { return err } @@ -233,20 +156,32 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt parentPath = layerChain[0] } - if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { - return err - } - - storageOptions, err := parseStorageOpt(storageOpt) - if err != nil { - return fmt.Errorf("Failed to parse storage options - %s", err) - } - - if storageOptions.size != 0 { - if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + if isTP5OrOlder() { + // Pre-create the layer directory, providing an ACL to give the Hyper-V Virtual Machines + // group access. This is necessary to ensure that Hyper-V containers can access the + // virtual machine data. This is not necessary post-TP5. + path, err := syscall.UTF16FromString(filepath.Join(d.info.HomeDir, id)) + if err != nil { + return err + } + // Give system and administrators full control, and VMs read, write, and execute. + // Mark these ACEs as inherited. + sd, err := winio.SddlToSecurityDescriptor("D:(A;OICI;FA;;;SY)(A;OICI;FA;;;BA)(A;OICI;FRFWFX;;;S-1-5-83-0)") + if err != nil { + return err + } + err = syscall.CreateDirectory(&path[0], &syscall.SecurityAttributes{ + Length: uint32(unsafe.Sizeof(syscall.SecurityAttributes{})), + SecurityDescriptor: uintptr(unsafe.Pointer(&sd[0])), + }) + if err != nil { return err } } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } } if _, err := os.Lstat(d.dir(parent)); err != nil { @@ -273,89 +208,16 @@ func (d *Driver) dir(id string) string { // Remove unmounts and removes the dir information. func (d *Driver) Remove(id string) error { - panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return err } - - // This retry loop is due to a bug in Windows (Internal bug #9432268) - // if GetContainers fails with ErrVmcomputeOperationInvalidState - // it is a transient error. Retry until it succeeds. - var computeSystems []hcsshim.ContainerProperties - retryCount := 0 - osv := system.GetOSVersion() - for { - // Get and terminate any template VMs that are currently using the layer. - // Note: It is unfortunate that we end up in the graphdrivers Remove() call - // for both containers and images, but the logic for template VMs is only - // needed for images - specifically we are looking to see if a base layer - // is in use by a template VM as a result of having started a Hyper-V - // container at some point. - // - // We have a retry loop for ErrVmcomputeOperationInvalidState and - // ErrVmcomputeOperationAccessIsDenied as there is a race condition - // in RS1 and RS2 building during enumeration when a silo is going away - // for example under it, in HCS. AccessIsDenied added to fix 30278. - // - // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider - // using platform APIs (if available) to get this more succinctly. Also - // consider enhancing the Remove() interface to have context of why - // the remove is being called - that could improve efficiency by not - // enumerating compute systems during a remove of a container as it's - // not required. - computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) - if err != nil { - if (osv.Build < 15139) && - ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { - if retryCount >= 500 { - break - } - retryCount++ - time.Sleep(10 * time.Millisecond) - continue - } - return err - } - break - } - - for _, computeSystem := range computeSystems { - if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { - container, err := hcsshim.OpenContainer(computeSystem.ID) - if err != nil { - return err - } - defer container.Close() - err = container.Terminate() - if hcsshim.IsPending(err) { - err = container.Wait() - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - return err - } - } - } - - layerPath := filepath.Join(d.info.HomeDir, rID) - tmpID := fmt.Sprintf("%s-removing", rID) - tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) - if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { - return err - } - if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { - logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) - } - - return nil + os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail + return hcsshim.DestroyLayer(d.info, rID) } -// Get returns the rootfs path for the id. This will mount the dir at its given path. +// Get returns the rootfs path for the id. This will mount the dir at it's given path. func (d *Driver) Get(id, mountLabel string) (string, error) { - panicIfUsedByLcow() logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) var dir string @@ -386,12 +248,9 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { return "", err } - mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + mountPath, err := hcsshim.LayerMountPath(d.info, rID) if err != nil { d.ctr.Decrement(rID) - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - logrus.Warnf("Failed to Unprepare %s: %s", id, err) - } if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } @@ -414,7 +273,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put adds a new layer to the driver. func (d *Driver) Put(id string) error { - panicIfUsedByLcow() logrus.Debugf("WindowsGraphDriver Put() id %s", id) rID, err := d.resolveID(id) @@ -425,15 +283,9 @@ func (d *Driver) Put(id string) error { return nil } d.cacheMu.Lock() - _, exists := d.cache[rID] delete(d.cache, rID) d.cacheMu.Unlock() - // If the cache was not populated, then the layer was left unprepared and deactivated - if !exists { - return nil - } - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { return err } @@ -441,39 +293,14 @@ func (d *Driver) Put(id string) error { } // Cleanup ensures the information the driver stores is properly removed. -// We use this opportunity to cleanup any -removing folders which may be -// still left if the daemon was killed while it was removing a layer. func (d *Driver) Cleanup() error { - items, err := ioutil.ReadDir(d.info.HomeDir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Note we don't return an error below - it's possible the files - // are locked. However, next time around after the daemon exits, - // we likely will be able to to cleanup successfully. Instead we log - // warnings if there are errors. - for _, item := range items { - if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { - if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { - logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) - } else { - logrus.Infof("Cleaned up %s", item.Name()) - } - } - } - return nil } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". // The layer should be mounted when calling this function -func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { - panicIfUsedByLcow() +func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) { rID, err := d.resolveID(id) if err != nil { return @@ -508,9 +335,8 @@ func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. -// The layer should not be mounted when calling this function. +// The layer should be mounted when calling this function func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - panicIfUsedByLcow() rID, err := d.resolveID(id) if err != nil { return nil, err @@ -520,12 +346,13 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return nil, err } - if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { return nil, err } defer func() { - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) } }() @@ -565,8 +392,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { // layer with the specified id and parent, returning the size of the // new layer in bytes. // The layer should not be mounted when calling this function -func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { - panicIfUsedByLcow() +func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { var layerChain []string if parent != "" { rPId, err := d.resolveID(parent) @@ -577,7 +403,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { if err != nil { return 0, err } - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + parentPath, err := hcsshim.LayerMountPath(d.info, rPId) if err != nil { return 0, err } @@ -601,7 +427,6 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - panicIfUsedByLcow() rPId, err := d.resolveID(parent) if err != nil { return @@ -623,7 +448,6 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) { // Metadata returns custom driver information. func (d *Driver) Metadata(id string) (map[string]string, error) { - panicIfUsedByLcow() m := make(map[string]string) m["dir"] = d.dir(id) return m, nil @@ -659,7 +483,7 @@ func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { } // exportLayer generates an archive from a layer based on the given ID. -func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Archive, error) { archive, w := io.Pipe() go func() { err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { @@ -681,48 +505,7 @@ func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadClose return archive, nil } -// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and -// writes it to a backup stream, and also saves any files that will be mutated -// by the import layer process to a backup location. -func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { - var bcdBackup *os.File - var bcdBackupWriter *winio.BackupFileWriter - if backupPath, ok := mutatedFiles[hdr.Name]; ok { - bcdBackup, err = os.Create(filepath.Join(root, backupPath)) - if err != nil { - return nil, err - } - defer func() { - cerr := bcdBackup.Close() - if err == nil { - err = cerr - } - }() - - bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) - defer func() { - cerr := bcdBackupWriter.Close() - if err == nil { - err = cerr - } - }() - - buf.Reset(io.MultiWriter(w, bcdBackupWriter)) - } else { - buf.Reset(w) - } - - defer func() { - ferr := buf.Flush() - if err == nil { - err = ferr - } - }() - - return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) -} - -func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { +func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) { t := tar.NewReader(r) hdr, err := t.Next() totalSize := int64(0) @@ -756,7 +539,30 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, if err != nil { return 0, err } - hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + buf.Reset(w) + + // Add the Hyper-V Virtual Machine group ACE to the security descriptor + // for TP5 so that Xenons can access all files. This is not necessary + // for post-TP5 builds. + if isTP5OrOlder() { + if sddl, ok := hdr.Winheaders["sd"]; ok { + var ace string + if hdr.Typeflag == tar.TypeDir { + ace = "(A;OICI;0x1200a9;;;S-1-5-83-0)" + } else { + ace = "(A;;0x1200a9;;;S-1-5-83-0)" + } + if hdr.Winheaders["sd"], ok = addAceToSddlDacl(sddl, ace); !ok { + logrus.Debugf("failed to add VM ACE to %s", sddl) + } + } + } + + hdr, err = backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) + ferr := buf.Flush() + if ferr != nil { + err = ferr + } totalSize += size } } @@ -766,75 +572,105 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, return totalSize, nil } -// importLayer adds a new layer to the tag and graph store based on the given data. -func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { - if !noreexec { - cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) - output := bytes.NewBuffer(nil) - cmd.Stdin = layerData - cmd.Stdout = output - cmd.Stderr = output - - if err = cmd.Start(); err != nil { - return +func addAceToSddlDacl(sddl, ace string) (string, bool) { + daclStart := strings.Index(sddl, "D:") + if daclStart < 0 { + return sddl, false + } + + dacl := sddl[daclStart:] + daclEnd := strings.Index(dacl, "S:") + if daclEnd < 0 { + daclEnd = len(dacl) + } + dacl = dacl[:daclEnd] + + if strings.Contains(dacl, ace) { + return sddl, true + } + + i := 2 + for i+1 < len(dacl) { + if dacl[i] != '(' { + return sddl, false } - if err = cmd.Wait(); err != nil { - return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + if dacl[i+1] == 'A' { + break } - return strconv.ParseInt(output.String(), 10, 64) + i += 2 + for p := 1; i < len(dacl) && p > 0; i++ { + if dacl[i] == '(' { + p++ + } else if dacl[i] == ')' { + p-- + } + } } - return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) -} -// writeLayerReexec is the re-exec entry point for writing a layer from a tar file -func writeLayerReexec() { - size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } - fmt.Fprint(os.Stdout, size) + return sddl[:daclStart+i] + ace + sddl[daclStart+i:], true } -// writeLayer writes a layer from a tar file. -func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { - err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) - if err != nil { - return 0, err - } - if noreexec { - defer func() { - if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { - // This should never happen, but just in case when in debugging mode. - // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. - panic("Failed to disabled process privileges while in non re-exec mode") - } - }() +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { + cmd := reexec.Command(append([]string{"storage-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return } - info := hcsshim.DriverInfo{ - Flavour: filterDriver, - HomeDir: home, + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) } - w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) - if err != nil { - return 0, err - } + return strconv.ParseInt(output.String(), 10, 64) +} - size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) - if err != nil { - return 0, err - } +// writeLayer is the re-exec entry point for writing a layer from a tar file +func writeLayer() { + home := os.Args[1] + id := os.Args[2] + parentLayerPaths := os.Args[3:] + + err := func() error { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return err + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return err + } + + size, err := writeLayerFromTar(os.Stdin, w) + if err != nil { + return err + } + + err = w.Close() + if err != nil { + return err + } + + fmt.Fprint(os.Stdout, size) + return nil + }() - err = w.Close() if err != nil { - return 0, err + fmt.Fprint(os.Stderr, err) + os.Exit(1) } - - return size, nil } // resolveID computes the layerID information based on the given id. @@ -850,7 +686,11 @@ func (d *Driver) resolveID(id string) (string, error) { // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { - return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + if err != nil { + return err + } + return nil } // getLayerChain returns the layer chain information. @@ -893,23 +733,17 @@ type fileGetCloserWithBackupPrivileges struct { } func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { - if backupPath, ok := mutatedFiles[filename]; ok { - return os.Open(filepath.Join(fg.path, backupPath)) - } - var f *os.File // Open the file while holding the Windows backup privilege. This ensures that the // file can be opened even if the caller does not actually have access to it according - // to the security descriptor. Also use sequential file access to avoid depleting the - // standby list - Microsoft VSO Bug Tracker #9900466 + // to the security descriptor. err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { path := longpath.AddPrefix(filepath.Join(fg.path, filename)) - p, err := windows.UTF16FromString(path) + p, err := syscall.UTF16FromString(path) if err != nil { return err } - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) if err != nil { return &os.PathError{Op: "open", Path: path, Err: err} } @@ -923,10 +757,19 @@ func (fg *fileGetCloserWithBackupPrivileges) Close() error { return nil } +type fileGetDestroyCloser struct { + storage.FileGetter + path string +} + +func (f *fileGetDestroyCloser) Close() error { + // TODO: activate layers and release here? + return os.RemoveAll(f.path) +} + // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - panicIfUsedByLcow() id, err := d.resolveID(id) if err != nil { return nil, err @@ -934,32 +777,3 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil } - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} - -type storageOptions struct { - size uint64 -} - -func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { - options := storageOptions{} - - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - options.size = uint64(size) - default: - return nil, fmt.Errorf("Unknown storage option: %s", key) - } - } - return &options, nil -} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index 8c8e7d671838..c9860ec28ae0 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -10,6 +10,7 @@ import ( "strconv" "strings" "sync" + "syscall" "time" "github.com/containers/storage/drivers" @@ -20,7 +21,6 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) type zfsOptions struct { @@ -100,14 +100,6 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) - } - if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { - return nil, fmt.Errorf("Failed to create '%s': %v", base, err) - } - if err := mount.MakePrivate(base); err != nil { return nil, err } @@ -142,8 +134,8 @@ func parseOptions(opt []string) (zfsOptions, error) { } func lookupZfsDataset(rootdir string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(rootdir, &stat); err != nil { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) } wantedDev := stat.Dev @@ -153,7 +145,7 @@ func lookupZfsDataset(rootdir string) (string, error) { return "", err } for _, m := range mounts { - if err := unix.Stat(m.Mountpoint, &stat); err != nil { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) continue // may fail on fuse file systems } @@ -221,10 +213,7 @@ func (d *Driver) Status() [][2]string { // Metadata returns image/container metadata related to graph driver func (d *Driver) Metadata(id string) (map[string]string, error) { - return map[string]string{ - "Mountpoint": d.mountPath(id), - "Dataset": d.zfsPath(id), - }, nil + return nil, nil } func (d *Driver) cloneFilesystem(name, parentName string) error { @@ -259,17 +248,12 @@ func (d *Driver) mountPath(id string) string { // CreateReadWrite creates a layer that is writable for use as a container // file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) } // Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - +func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error { err := d.create(id, parent, storageOpt) if err == nil { return nil @@ -407,20 +391,22 @@ func (d *Driver) Put(id string) error { logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) - if err := mount.Unmount(mountpoint); err != nil { + err = mount.Unmount(mountpoint) + if err != nil { return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) } - return nil + return err } // Exists checks to see if the cache entry exists for the given id. func (d *Driver) Exists(id string) bool { d.Lock() defer d.Unlock() - return d.filesystemsCache[d.zfsPath(id)] + return d.filesystemsCache[d.zfsPath(id)] == true } // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - return nil + var imageStores []string + return imageStores } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index 69c0448d3423..ade71b15d700 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -3,16 +3,16 @@ package zfs import ( "fmt" "strings" + "syscall" "github.com/containers/storage/drivers" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) func checkRootdirFs(rootdir string) error { - var buf unix.Statfs_t - if err := unix.Statfs(rootdir, &buf); err != nil { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index da298047d55f..92b387569015 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -2,16 +2,16 @@ package zfs import ( "fmt" + "syscall" "github.com/containers/storage/drivers" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) func checkRootdirFs(rootdir string) error { - var buf unix.Statfs_t - if err := unix.Statfs(rootdir, &buf); err != nil { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go index 2383bf3bf36b..ca595638105c 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go @@ -22,23 +22,24 @@ import ( "github.com/containers/storage/drivers" "github.com/pkg/errors" - "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func checkRootdirFs(rootdir string) error { cs := C.CString(filepath.Dir(rootdir)) - defer C.free(unsafe.Pointer(cs)) buf := C.getstatfs(cs) - defer C.free(unsafe.Pointer(buf)) // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || (buf.f_basetype[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) } + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) return nil } diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go deleted file mode 100644 index bed6f8cdc724..000000000000 --- a/vendor/github.com/containers/storage/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -package storage - -import ( - "errors" -) - -var ( - // ErrContainerUnknown indicates that there was no container with the specified name or ID. - ErrContainerUnknown = errors.New("container not known") - // ErrImageUnknown indicates that there was no image with the specified name or ID. - ErrImageUnknown = errors.New("image not known") - // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. - ErrParentUnknown = errors.New("parent of layer not known") - // ErrLayerUnknown indicates that there was no layer with the specified name or ID. - ErrLayerUnknown = errors.New("layer not known") - // ErrLoadError indicates that there was an initialization error. - ErrLoadError = errors.New("error loading storage metadata") - // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. - ErrDuplicateID = errors.New("that ID is already in use") - // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. - ErrDuplicateName = errors.New("that name is already in use") - // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. - ErrParentIsContainer = errors.New("would-be parent layer is a container") - // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. - ErrNotAContainer = errors.New("identifier is not a container") - // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. - ErrNotAnImage = errors.New("identifier is not an image") - // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. - ErrNotALayer = errors.New("identifier is not a layer") - // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. - ErrNotAnID = errors.New("identifier is not a layer, image, or container") - // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. - ErrLayerHasChildren = errors.New("layer has children") - // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. - ErrLayerUsedByImage = errors.New("layer is in use by an image") - // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. - ErrLayerUsedByContainer = errors.New("layer is in use by a container") - // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. - ErrImageUsedByContainer = errors.New("image is in use by a container") - // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. - ErrIncompleteOptions = errors.New("missing necessary StoreOptions") - // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. - ErrSizeUnknown = errors.New("size is not known") - // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. - ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") - // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write. - ErrLockReadOnly = errors.New("lock is not a read-write lock") - // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. - ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") - // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. - ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") - // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. - ErrInvalidBigDataName = errors.New("not a valid name for a big data item") - // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. - ErrDigestUnknown = errors.New("could not compute digest of item") -) diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 8e8f11491369..fe17f63178b0 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -10,15 +10,12 @@ import ( "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" - digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) -const ( - // ImageDigestBigDataKey is the name of the big data item whose - // contents we consider useful for computing a "digest" of the - // image, by which we can locate the image later. - ImageDigestBigDataKey = "manifest" +var ( + // ErrImageUnknown indicates that there was no image with the specified name or ID + ErrImageUnknown = errors.New("image not known") ) // An Image is a reference to a layer and an associated metadata string. @@ -32,10 +29,9 @@ type Image struct { // unique among images. Names []string `json:"names,omitempty"` - // TopLayer is the ID of the topmost layer of the image itself, if the - // image contains one or more layers. Multiple images can refer to the - // same top layer. - TopLayer string `json:"layer,omitempty"` + // TopLayer is the ID of the topmost layer of the image itself. + // Multiple images can refer to the same top layer. + TopLayer string `json:"layer"` // Metadata is data we keep for the convenience of the caller. It is not // expected to be large, since it is kept in memory. @@ -50,10 +46,6 @@ type Image struct { // that has been stored, if they're known. BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` - // BigDataDigests maps the names in BigDataNames to the digests of the - // data that has been stored, if they're known. - BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` - // Created is the datestamp for when this image was created. Older // versions of the library did not track this information, so callers // will likely want to use the IsZero() method to verify that a value @@ -81,10 +73,6 @@ type ROImageStore interface { // Images returns a slice enumerating the known images. Images() ([]Image, error) - - // Images returns a slice enumerating the images which have a big data - // item with the name ImageDigestBigDataKey and the specified digest. - ByDigest(d digest.Digest) ([]*Image, error) } // ImageStore provides bookkeeping for information about Images. @@ -118,7 +106,6 @@ type imageStore struct { idindex *truncindex.TruncIndex byid map[string]*Image byname map[string]*Image - bydigest map[digest.Digest][]*Image } func (r *imageStore) Images() ([]Image, error) { @@ -152,9 +139,7 @@ func (r *imageStore) Load() error { idlist := []string{} ids := make(map[string]*Image) names := make(map[string]*Image) - digests := make(map[digest.Digest][]*Image) if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(images)) for n, image := range images { ids[image.ID] = images[n] idlist = append(idlist, image.ID) @@ -165,19 +150,15 @@ func (r *imageStore) Load() error { } names[name] = images[n] } - if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { - digests[digest] = append(digests[digest], images[n]) - } } } if shouldSave && !r.IsReadWrite() { - return ErrDuplicateImageNames + return errors.New("image store assigns the same name to multiple images") } r.images = images r.idindex = truncindex.NewTruncIndex(idlist) r.byid = ids r.byname = names - r.bydigest = digests if shouldSave { return r.Save() } @@ -216,7 +197,6 @@ func newImageStore(dir string) (ImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), - bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -237,7 +217,6 @@ func newROImageStore(dir string) (ROImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), - bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -277,9 +256,6 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { if !ok { return ErrImageUnknown } - if image.Flags == nil { - image.Flags = make(map[string]interface{}) - } image.Flags[flag] = value return r.Save() } @@ -299,7 +275,6 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c if _, idInUse := r.byid[id]; idInUse { return nil, ErrDuplicateID } - names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { return nil, ErrDuplicateName @@ -310,15 +285,14 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c } if err == nil { image = &Image{ - ID: id, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: created, - Flags: make(map[string]interface{}), + ID: id, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + Created: created, + Flags: make(map[string]interface{}), } r.images = append(r.images, image) r.idindex.Add(id) @@ -357,7 +331,6 @@ func (r *imageStore) SetNames(id string, names []string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) } - names = dedupeNames(names) if image, ok := r.lookup(id); ok { for _, name := range image.Names { delete(r.byname, name) @@ -402,17 +375,6 @@ func (r *imageStore) Delete(id string) error { r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) } } - if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { - // remove the image from the digest-based index - if list, ok := r.bydigest[digest]; ok { - prunedList := imageSliceWithoutValue(list, image) - if len(prunedList) == 0 { - delete(r.bydigest, digest) - } else { - r.bydigest[digest] = prunedList - } - } - } if err := r.Save(); err != nil { return err } @@ -441,17 +403,7 @@ func (r *imageStore) Exists(id string) bool { return ok } -func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { - if images, ok := r.bydigest[d]; ok { - return images, nil - } - return nil, ErrImageUnknown -} - func (r *imageStore) BigData(id, key string) ([]byte, error) { - if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") - } image, ok := r.lookup(id) if !ok { return nil, ErrImageUnknown @@ -460,61 +412,16 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) { } func (r *imageStore) BigDataSize(id, key string) (int64, error) { - if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") - } image, ok := r.lookup(id) if !ok { return -1, ErrImageUnknown } - if image.BigDataSizes == nil { - image.BigDataSizes = make(map[string]int64) - } if size, ok := image.BigDataSizes[key]; ok { return size, nil } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - image, ok := r.lookup(id) - if !ok { - return -1, ErrImageUnknown - } - if size, ok := image.BigDataSizes[key]; ok { - return size, nil - } - } - } return -1, ErrSizeUnknown } -func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { - if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") - } - image, ok := r.lookup(id) - if !ok { - return "", ErrImageUnknown - } - if image.BigDataDigests == nil { - image.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := image.BigDataDigests[key]; ok { - return d, nil - } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - image, ok := r.lookup(id) - if !ok { - return "", ErrImageUnknown - } - if d, ok := image.BigDataDigests[key]; ok { - return d, nil - } - } - } - return "", ErrDigestUnknown -} - func (r *imageStore) BigDataNames(id string) ([]string, error) { image, ok := r.lookup(id) if !ok { @@ -523,21 +430,7 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) { return image.BigDataNames, nil } -func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { - modified := make([]*Image, 0, len(slice)) - for _, v := range slice { - if v == value { - continue - } - modified = append(modified, v) - } - return modified -} - func (r *imageStore) SetBigData(id, key string, data []byte) error { - if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") - } if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) } @@ -550,54 +443,23 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error { } err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) if err == nil { + add := true save := false - if image.BigDataSizes == nil { - image.BigDataSizes = make(map[string]int64) - } - oldSize, sizeOk := image.BigDataSizes[key] + oldSize, ok := image.BigDataSizes[key] image.BigDataSizes[key] = int64(len(data)) - if image.BigDataDigests == nil { - image.BigDataDigests = make(map[string]digest.Digest) - } - oldDigest, digestOk := image.BigDataDigests[key] - newDigest := digest.Canonical.FromBytes(data) - image.BigDataDigests[key] = newDigest - if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + if !ok || oldSize != image.BigDataSizes[key] { save = true } - addName := true for _, name := range image.BigDataNames { if name == key { - addName = false + add = false break } } - if addName { + if add { image.BigDataNames = append(image.BigDataNames, key) save = true } - if key == ImageDigestBigDataKey { - if oldDigest != "" && oldDigest != newDigest { - // remove the image from the list of images in the digest-based - // index which corresponds to the old digest for this item - if list, ok := r.bydigest[oldDigest]; ok { - prunedList := imageSliceWithoutValue(list, image) - if len(prunedList) == 0 { - delete(r.bydigest, oldDigest) - } else { - r.bydigest[oldDigest] = prunedList - } - } - } - // add the image to the list of images in the digest-based index which - // corresponds to the new digest for this item, unless it's already there - list := r.bydigest[newDigest] - if len(list) == len(imageSliceWithoutValue(list, image)) { - // the list isn't shortened by trying to prune this image from it, - // so it's not in there yet - r.bydigest[newDigest] = append(list, image) - } - } if save { err = r.Save() } @@ -609,7 +471,7 @@ func (r *imageStore) Wipe() error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) } - ids := make([]string, 0, len(r.byid)) + ids := []string{} for id := range r.byid { ids = append(ids, id) } diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go deleted file mode 100644 index 59df7805efb6..000000000000 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ /dev/null @@ -1,1150 +0,0 @@ -// Code generated by ffjson . DO NOT EDIT. -// source: images.go - -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/opencontainers/go-digest" - fflib "github.com/pquerna/ffjson/fflib/v1" -) - -// MarshalJSON marshal bytes to json - template -func (j *Image) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{ "id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteByte(',') - if len(j.Names) != 0 { - buf.WriteString(`"names":`) - if j.Names != nil { - buf.WriteString(`[`) - for i, v := range j.Names { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.TopLayer) != 0 { - buf.WriteString(`"layer":`) - fflib.WriteJsonString(buf, string(j.TopLayer)) - buf.WriteByte(',') - } - if len(j.Metadata) != 0 { - buf.WriteString(`"metadata":`) - fflib.WriteJsonString(buf, string(j.Metadata)) - buf.WriteByte(',') - } - if len(j.BigDataNames) != 0 { - buf.WriteString(`"big-data-names":`) - if j.BigDataNames != nil { - buf.WriteString(`[`) - for i, v := range j.BigDataNames { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.BigDataSizes) != 0 { - if j.BigDataSizes == nil { - buf.WriteString(`"big-data-sizes":null`) - } else { - buf.WriteString(`"big-data-sizes":{ `) - for key, value := range j.BigDataSizes { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.FormatBits2(buf, uint64(value), 10, value < 0) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if len(j.BigDataDigests) != 0 { - if j.BigDataDigests == nil { - buf.WriteString(`"big-data-digests":null`) - } else { - buf.WriteString(`"big-data-digests":{ `) - for key, value := range j.BigDataDigests { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.WriteJsonString(buf, string(value)) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if true { - buf.WriteString(`"created":`) - - { - - obj, err = j.Created.MarshalJSON() - if err != nil { - return err - } - buf.Write(obj) - - } - buf.WriteByte(',') - } - if len(j.Flags) != 0 { - buf.WriteString(`"flags":`) - /* Falling back. type=map[string]interface {} kind=map */ - err = buf.Encode(j.Flags) - if err != nil { - return err - } - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - return nil -} - -const ( - ffjtImagebase = iota - ffjtImagenosuchkey - - ffjtImageID - - ffjtImageNames - - ffjtImageTopLayer - - ffjtImageMetadata - - ffjtImageBigDataNames - - ffjtImageBigDataSizes - - ffjtImageBigDataDigests - - ffjtImageCreated - - ffjtImageFlags -) - -var ffjKeyImageID = []byte("id") - -var ffjKeyImageNames = []byte("names") - -var ffjKeyImageTopLayer = []byte("layer") - -var ffjKeyImageMetadata = []byte("metadata") - -var ffjKeyImageBigDataNames = []byte("big-data-names") - -var ffjKeyImageBigDataSizes = []byte("big-data-sizes") - -var ffjKeyImageBigDataDigests = []byte("big-data-digests") - -var ffjKeyImageCreated = []byte("created") - -var ffjKeyImageFlags = []byte("flags") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *Image) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *Image) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtImagebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtImagenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'b': - - if bytes.Equal(ffjKeyImageBigDataNames, kn) { - currentKey = ffjtImageBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyImageBigDataSizes, kn) { - currentKey = ffjtImageBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyImageBigDataDigests, kn) { - currentKey = ffjtImageBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'c': - - if bytes.Equal(ffjKeyImageCreated, kn) { - currentKey = ffjtImageCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'f': - - if bytes.Equal(ffjKeyImageFlags, kn) { - currentKey = ffjtImageFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeyImageID, kn) { - currentKey = ffjtImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'l': - - if bytes.Equal(ffjKeyImageTopLayer, kn) { - currentKey = ffjtImageTopLayer - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'm': - - if bytes.Equal(ffjKeyImageMetadata, kn) { - currentKey = ffjtImageMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'n': - - if bytes.Equal(ffjKeyImageNames, kn) { - currentKey = ffjtImageNames - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyImageFlags, kn) { - currentKey = ffjtImageFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageCreated, kn) { - currentKey = ffjtImageCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageBigDataDigests, kn) { - currentKey = ffjtImageBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageBigDataSizes, kn) { - currentKey = ffjtImageBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageBigDataNames, kn) { - currentKey = ffjtImageBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageMetadata, kn) { - currentKey = ffjtImageMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) { - currentKey = ffjtImageTopLayer - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageNames, kn) { - currentKey = ffjtImageNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) { - currentKey = ffjtImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtImagenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtImageID: - goto handle_ID - - case ffjtImageNames: - goto handle_Names - - case ffjtImageTopLayer: - goto handle_TopLayer - - case ffjtImageMetadata: - goto handle_Metadata - - case ffjtImageBigDataNames: - goto handle_BigDataNames - - case ffjtImageBigDataSizes: - goto handle_BigDataSizes - - case ffjtImageBigDataDigests: - goto handle_BigDataDigests - - case ffjtImageCreated: - goto handle_Created - - case ffjtImageFlags: - goto handle_Flags - - case ffjtImagenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Names: - - /* handler: j.Names type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Names = nil - } else { - - j.Names = []string{} - - wantVal := true - - for { - - var tmpJNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJNames = string(string(outBuf)) - - } - } - - j.Names = append(j.Names, tmpJNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_TopLayer: - - /* handler: j.TopLayer type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.TopLayer = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Metadata: - - /* handler: j.Metadata type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Metadata = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataNames: - - /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataNames = nil - } else { - - j.BigDataNames = []string{} - - wantVal := true - - for { - - var tmpJBigDataNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataNames = string(string(outBuf)) - - } - } - - j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataSizes: - - /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataSizes = nil - } else { - - j.BigDataSizes = make(map[string]int64, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataSizes int64 - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - tmpJBigDataSizes = int64(tval) - - } - } - - j.BigDataSizes[k] = tmpJBigDataSizes - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataDigests: - - /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataDigests = nil - } else { - - j.BigDataDigests = make(map[string]digest.Digest, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataDigests digest.Digest - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataDigests = digest.Digest(string(outBuf)) - - } - } - - j.BigDataDigests[k] = tmpJBigDataDigests - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Created: - - /* handler: j.Created type=time.Time kind=struct quoted=false*/ - - { - if tok == fflib.FFTok_null { - - } else { - - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = j.Created.UnmarshalJSON(tbuf) - if err != nil { - return fs.WrapErr(err) - } - } - state = fflib.FFParse_after_value - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Flags: - - /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Flags = nil - } else { - - j.Flags = make(map[string]interface{}, 0) - - wantVal := true - - for { - - var k string - - var tmpJFlags interface{} - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ - - { - /* Falling back. type=interface {} kind=interface */ - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = json.Unmarshal(tbuf, &tmpJFlags) - if err != nil { - return fs.WrapErr(err) - } - } - - j.Flags[k] = tmpJFlags - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *imageStore) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *imageStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtimageStorebase = iota - ffjtimageStorenosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *imageStore) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *imageStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtimageStorebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtimageStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtimageStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtimageStorenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index f51406a020e0..7cdc2e25245a 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -27,6 +27,13 @@ const ( compressionFlag = "diff-compression" ) +var ( + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer + ErrParentUnknown = errors.New("parent of layer not known") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID + ErrLayerUnknown = errors.New("layer not known") +) + // A Layer is a record of a copy-on-write layer that's stored by the lower // level graph driver. type Layer struct { @@ -184,7 +191,7 @@ type LayerStore interface { CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) // Put combines the functions of CreateWithFlags and ApplyDiff. - Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error) // SetNames replaces the list of names associated with a layer with the // supplied values. @@ -206,7 +213,7 @@ type LayerStore interface { // ApplyDiff reads a tarstream which was created by a previous call to Diff and // applies its changes to a specified layer. - ApplyDiff(to string, diff io.Reader) (int64, error) + ApplyDiff(to string, diff archive.Reader) (int64, error) } type layerStore struct { @@ -254,7 +261,6 @@ func (r *layerStore) Load() error { compressedsums := make(map[digest.Digest][]string) uncompressedsums := make(map[digest.Digest][]string) if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(layers)) for n, layer := range layers { ids[layer.ID] = layers[n] idlist = append(idlist, layer.ID) @@ -274,7 +280,7 @@ func (r *layerStore) Load() error { } } if shouldSave && !r.IsReadWrite() { - return ErrDuplicateLayerNames + return errors.New("layer store assigns the same name to multiple layers") } mpath := r.mountspath() data, err = ioutil.ReadFile(mpath) @@ -306,9 +312,6 @@ func (r *layerStore) Load() error { // actually delete. if r.IsReadWrite() { for _, layer := range r.layers { - if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) - } if cleanup, ok := layer.Flags[incompleteFlag]; ok { if b, ok := cleanup.(bool); ok && b { err = r.Delete(layer.ID) @@ -342,7 +345,7 @@ func (r *layerStore) Save() error { if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { return err } - mounts := make([]layerMountPoint, 0, len(r.layers)) + mounts := []layerMountPoint{} for _, layer := range r.layers { if layer.MountPoint != "" && layer.MountCount > 0 { mounts = append(mounts, layerMountPoint{ @@ -459,9 +462,6 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { if !ok { return ErrLayerUnknown } - if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) - } layer.Flags[flag] = value return r.Save() } @@ -470,7 +470,7 @@ func (r *layerStore) Status() ([][2]string, error) { return r.driver.Status(), nil } -func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) { if !r.IsReadWrite() { return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) } @@ -497,20 +497,15 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o if _, idInUse := r.byid[id]; idInUse { return nil, -1, ErrDuplicateID } - names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { return nil, -1, ErrDuplicateName } } - opts := drivers.CreateOpts{ - MountLabel: mountLabel, - StorageOpt: options, - } if writeable { - err = r.driver.CreateReadWrite(id, parent, &opts) + err = r.driver.CreateReadWrite(id, parent, mountLabel, options) } else { - err = r.driver.Create(id, parent, &opts) + err = r.driver.Create(id, parent, mountLabel, options) } if err == nil { layer = &Layer{ @@ -634,7 +629,6 @@ func (r *layerStore) SetNames(id string, names []string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) } - names = dedupeNames(names) if layer, ok := r.lookup(id); ok { for _, name := range layer.Names { delete(r.byname, name) @@ -740,7 +734,7 @@ func (r *layerStore) Wipe() error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) } - ids := make([]string, 0, len(r.byid)) + ids := []string{} for id := range r.byid { ids = append(ids, id) } @@ -913,7 +907,7 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) { return r.driver.DiffSize(to, from) } -func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { +func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) { if !r.IsReadWrite() { return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) } diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go deleted file mode 100644 index 8bec40e17a81..000000000000 --- a/vendor/github.com/containers/storage/layers_ffjson.go +++ /dev/null @@ -1,1713 +0,0 @@ -// Code generated by ffjson . DO NOT EDIT. -// source: layers.go - -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/containers/storage/pkg/archive" - "github.com/opencontainers/go-digest" - fflib "github.com/pquerna/ffjson/fflib/v1" -) - -// MarshalJSON marshal bytes to json - template -func (j *DiffOptions) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - if j.Compression != nil { - buf.WriteString(`{"Compression":`) - fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0) - } else { - buf.WriteString(`{"Compression":null`) - } - buf.WriteByte('}') - return nil -} - -const ( - ffjtDiffOptionsbase = iota - ffjtDiffOptionsnosuchkey - - ffjtDiffOptionsCompression -) - -var ffjKeyDiffOptionsCompression = []byte("Compression") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *DiffOptions) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtDiffOptionsbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtDiffOptionsnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'C': - - if bytes.Equal(ffjKeyDiffOptionsCompression, kn) { - currentKey = ffjtDiffOptionsCompression - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) { - currentKey = ffjtDiffOptionsCompression - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtDiffOptionsnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtDiffOptionsCompression: - goto handle_Compression - - case ffjtDiffOptionsnosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_Compression: - - /* handler: j.Compression type=archive.Compression kind=int quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - j.Compression = nil - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - ttypval := archive.Compression(tval) - j.Compression = &ttypval - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *Layer) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{ "id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteByte(',') - if len(j.Names) != 0 { - buf.WriteString(`"names":`) - if j.Names != nil { - buf.WriteString(`[`) - for i, v := range j.Names { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.Parent) != 0 { - buf.WriteString(`"parent":`) - fflib.WriteJsonString(buf, string(j.Parent)) - buf.WriteByte(',') - } - if len(j.Metadata) != 0 { - buf.WriteString(`"metadata":`) - fflib.WriteJsonString(buf, string(j.Metadata)) - buf.WriteByte(',') - } - if len(j.MountLabel) != 0 { - buf.WriteString(`"mountlabel":`) - fflib.WriteJsonString(buf, string(j.MountLabel)) - buf.WriteByte(',') - } - if true { - buf.WriteString(`"created":`) - - { - - obj, err = j.Created.MarshalJSON() - if err != nil { - return err - } - buf.Write(obj) - - } - buf.WriteByte(',') - } - if len(j.CompressedDigest) != 0 { - buf.WriteString(`"compressed-diff-digest":`) - fflib.WriteJsonString(buf, string(j.CompressedDigest)) - buf.WriteByte(',') - } - if j.CompressedSize != 0 { - buf.WriteString(`"compressed-size":`) - fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0) - buf.WriteByte(',') - } - if len(j.UncompressedDigest) != 0 { - buf.WriteString(`"diff-digest":`) - fflib.WriteJsonString(buf, string(j.UncompressedDigest)) - buf.WriteByte(',') - } - if j.UncompressedSize != 0 { - buf.WriteString(`"diff-size":`) - fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0) - buf.WriteByte(',') - } - if j.CompressionType != 0 { - buf.WriteString(`"compression":`) - fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0) - buf.WriteByte(',') - } - if len(j.Flags) != 0 { - buf.WriteString(`"flags":`) - /* Falling back. type=map[string]interface {} kind=map */ - err = buf.Encode(j.Flags) - if err != nil { - return err - } - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - return nil -} - -const ( - ffjtLayerbase = iota - ffjtLayernosuchkey - - ffjtLayerID - - ffjtLayerNames - - ffjtLayerParent - - ffjtLayerMetadata - - ffjtLayerMountLabel - - ffjtLayerCreated - - ffjtLayerCompressedDigest - - ffjtLayerCompressedSize - - ffjtLayerUncompressedDigest - - ffjtLayerUncompressedSize - - ffjtLayerCompressionType - - ffjtLayerFlags -) - -var ffjKeyLayerID = []byte("id") - -var ffjKeyLayerNames = []byte("names") - -var ffjKeyLayerParent = []byte("parent") - -var ffjKeyLayerMetadata = []byte("metadata") - -var ffjKeyLayerMountLabel = []byte("mountlabel") - -var ffjKeyLayerCreated = []byte("created") - -var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest") - -var ffjKeyLayerCompressedSize = []byte("compressed-size") - -var ffjKeyLayerUncompressedDigest = []byte("diff-digest") - -var ffjKeyLayerUncompressedSize = []byte("diff-size") - -var ffjKeyLayerCompressionType = []byte("compression") - -var ffjKeyLayerFlags = []byte("flags") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *Layer) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtLayerbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtLayernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'c': - - if bytes.Equal(ffjKeyLayerCreated, kn) { - currentKey = ffjtLayerCreated - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) { - currentKey = ffjtLayerCompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) { - currentKey = ffjtLayerCompressedSize - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerCompressionType, kn) { - currentKey = ffjtLayerCompressionType - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'd': - - if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) { - currentKey = ffjtLayerUncompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) { - currentKey = ffjtLayerUncompressedSize - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'f': - - if bytes.Equal(ffjKeyLayerFlags, kn) { - currentKey = ffjtLayerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeyLayerID, kn) { - currentKey = ffjtLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'm': - - if bytes.Equal(ffjKeyLayerMetadata, kn) { - currentKey = ffjtLayerMetadata - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerMountLabel, kn) { - currentKey = ffjtLayerMountLabel - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'n': - - if bytes.Equal(ffjKeyLayerNames, kn) { - currentKey = ffjtLayerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'p': - - if bytes.Equal(ffjKeyLayerParent, kn) { - currentKey = ffjtLayerParent - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) { - currentKey = ffjtLayerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) { - currentKey = ffjtLayerCompressionType - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) { - currentKey = ffjtLayerUncompressedSize - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) { - currentKey = ffjtLayerUncompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) { - currentKey = ffjtLayerCompressedSize - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) { - currentKey = ffjtLayerCompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) { - currentKey = ffjtLayerCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) { - currentKey = ffjtLayerMountLabel - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) { - currentKey = ffjtLayerMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) { - currentKey = ffjtLayerParent - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerNames, kn) { - currentKey = ffjtLayerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) { - currentKey = ffjtLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtLayernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtLayerID: - goto handle_ID - - case ffjtLayerNames: - goto handle_Names - - case ffjtLayerParent: - goto handle_Parent - - case ffjtLayerMetadata: - goto handle_Metadata - - case ffjtLayerMountLabel: - goto handle_MountLabel - - case ffjtLayerCreated: - goto handle_Created - - case ffjtLayerCompressedDigest: - goto handle_CompressedDigest - - case ffjtLayerCompressedSize: - goto handle_CompressedSize - - case ffjtLayerUncompressedDigest: - goto handle_UncompressedDigest - - case ffjtLayerUncompressedSize: - goto handle_UncompressedSize - - case ffjtLayerCompressionType: - goto handle_CompressionType - - case ffjtLayerFlags: - goto handle_Flags - - case ffjtLayernosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Names: - - /* handler: j.Names type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Names = nil - } else { - - j.Names = []string{} - - wantVal := true - - for { - - var tmpJNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJNames = string(string(outBuf)) - - } - } - - j.Names = append(j.Names, tmpJNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Parent: - - /* handler: j.Parent type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Parent = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Metadata: - - /* handler: j.Metadata type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Metadata = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_MountLabel: - - /* handler: j.MountLabel type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.MountLabel = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Created: - - /* handler: j.Created type=time.Time kind=struct quoted=false*/ - - { - if tok == fflib.FFTok_null { - - } else { - - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = j.Created.UnmarshalJSON(tbuf) - if err != nil { - return fs.WrapErr(err) - } - } - state = fflib.FFParse_after_value - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_CompressedDigest: - - /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.CompressedDigest = digest.Digest(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_CompressedSize: - - /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.CompressedSize = int64(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_UncompressedDigest: - - /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.UncompressedDigest = digest.Digest(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_UncompressedSize: - - /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.UncompressedSize = int64(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_CompressionType: - - /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.CompressionType = archive.Compression(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Flags: - - /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Flags = nil - } else { - - j.Flags = make(map[string]interface{}, 0) - - wantVal := true - - for { - - var k string - - var tmpJFlags interface{} - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ - - { - /* Falling back. type=interface {} kind=interface */ - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = json.Unmarshal(tbuf, &tmpJFlags) - if err != nil { - return fs.WrapErr(err) - } - } - - j.Flags[k] = tmpJFlags - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *layerMountPoint) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{"id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteString(`,"path":`) - fflib.WriteJsonString(buf, string(j.MountPoint)) - buf.WriteString(`,"count":`) - fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0) - buf.WriteByte('}') - return nil -} - -const ( - ffjtlayerMountPointbase = iota - ffjtlayerMountPointnosuchkey - - ffjtlayerMountPointID - - ffjtlayerMountPointMountPoint - - ffjtlayerMountPointMountCount -) - -var ffjKeylayerMountPointID = []byte("id") - -var ffjKeylayerMountPointMountPoint = []byte("path") - -var ffjKeylayerMountPointMountCount = []byte("count") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *layerMountPoint) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtlayerMountPointbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtlayerMountPointnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'c': - - if bytes.Equal(ffjKeylayerMountPointMountCount, kn) { - currentKey = ffjtlayerMountPointMountCount - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeylayerMountPointID, kn) { - currentKey = ffjtlayerMountPointID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'p': - - if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) { - currentKey = ffjtlayerMountPointMountPoint - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) { - currentKey = ffjtlayerMountPointMountCount - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) { - currentKey = ffjtlayerMountPointMountPoint - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) { - currentKey = ffjtlayerMountPointID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtlayerMountPointnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtlayerMountPointID: - goto handle_ID - - case ffjtlayerMountPointMountPoint: - goto handle_MountPoint - - case ffjtlayerMountPointMountCount: - goto handle_MountCount - - case ffjtlayerMountPointnosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_MountPoint: - - /* handler: j.MountPoint type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.MountPoint = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_MountCount: - - /* handler: j.MountCount type=int kind=int quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.MountCount = int(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *layerStore) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtlayerStorebase = iota - ffjtlayerStorenosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *layerStore) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtlayerStorebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtlayerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtlayerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtlayerStorenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *simpleGetCloser) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtsimpleGetCloserbase = iota - ffjtsimpleGetClosernosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *simpleGetCloser) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtsimpleGetCloserbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtsimpleGetClosernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtsimpleGetClosernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtsimpleGetClosernosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go index 41ee9017ad69..6e09b5264090 100644 --- a/vendor/github.com/containers/storage/lockfile.go +++ b/vendor/github.com/containers/storage/lockfile.go @@ -44,6 +44,8 @@ type lockfile struct { var ( lockfiles map[string]*lockfile lockfilesLock sync.Mutex + // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write + ErrLockReadOnly = errors.New("lock is not a read-write lock") ) // GetLockfile opens a read-write lock file, creating it if necessary. The diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md deleted file mode 100644 index 7307d9694f66..000000000000 --- a/vendor/github.com/containers/storage/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index abee36f7e433..a4071d71b535 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -6,6 +6,7 @@ import ( "bytes" "compress/bzip2" "compress/gzip" + "errors" "fmt" "io" "io/ioutil" @@ -26,11 +27,18 @@ import ( ) type ( + // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. + Archive io.ReadCloser + // Reader is a type of io.Reader. + Reader io.Reader // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int - + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } // TarOptions wraps the tar options. TarOptions struct { IncludeFiles []string @@ -39,7 +47,7 @@ type ( NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap - ChownOpts *idtools.IDPair + ChownOpts *TarChownOptions IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack @@ -51,28 +59,34 @@ type ( // For each include when creating an archive, the included name will be // replaced with the matching name from this map. RebaseNames map[string]string - InUserNS bool } -) -// Archiver allows the reuse of most utility functions of this package -// with a pluggable Untar function. Also, to facilitate the passing of -// specific id mappings for untar, an archiver can be created with maps -// which will then be passed to Untar operations -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMappings *idtools.IDMappings -} + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } -// NewDefaultArchiver returns a new Archiver without any IDMappings -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} -} + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) const ( // Uncompressed represents the uncompressed. @@ -93,15 +107,17 @@ const ( OverlayWhiteoutFormat ) -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. @@ -131,7 +147,7 @@ func DetectCompression(source []byte) Compression { logrus.Debug("Len too short") continue } - if bytes.Equal(m, source[:len(m)]) { + if bytes.Compare(m, source[:len(m)]) == 0 { return compression } } @@ -190,7 +206,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } } -// CompressStream compresses the dest with specified compression algorithm. +// CompressStream compresseses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) @@ -204,100 +220,13 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars + // However, this is not a problem as we only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - header.Name = name - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - - }() - return pipeReader -} - // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { @@ -313,65 +242,8 @@ func (compression *Compression) Extension() string { return "" } -// FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return nil, err - } - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - return nil -} - type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertWrite(*tar.Header, string, os.FileInfo) error ConvertRead(*tar.Header, string) (bool, error) } @@ -380,9 +252,9 @@ type tarAppender struct { Buffer *bufio.Writer // for hardlink mapping - SeenFiles map[uint64]string - IDMappings *idtools.IDMappings - ChownOpts *idtools.IDPair + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined @@ -391,16 +263,6 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } -func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IDMappings: idMapping, - ChownOpts: chownOpts, - } -} - // canonicalTarName provides a platform-independent and consistent posix-style //path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { @@ -423,30 +285,33 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } - var link string + link := "" if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { + if link, err = os.Readlink(path); err != nil { return err } } - hdr, err := FileInfoHeader(name, fi, link) + hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return err } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { return err } // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly + // it's hardlinked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { @@ -458,45 +323,35 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + //handle re-mapping container ID mappings back to host ID mappings before //writing tar headers/files. We skip whiteout files because they were written //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) if err != nil { return err } - hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) if err != nil { return err } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) if err != nil { return err } + hdr.Uid = xUID + hdr.Gid = xGID + } - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo + if ta.WhiteoutConverter != nil { + if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil { + return err } } @@ -505,10 +360,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) + file, err := os.Open(path) if err != nil { return err } @@ -529,7 +381,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -546,10 +398,8 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } @@ -559,16 +409,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } file.Close() - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err @@ -603,13 +444,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return nil default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { - chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err @@ -684,7 +525,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + if err != nil { return nil, err } @@ -697,12 +539,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), + } defer func() { // Make sure to check the error on Close. @@ -779,7 +623,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = pm.Matches(relFilePath) + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err @@ -789,7 +633,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this + // excludes pattern (eg !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. @@ -798,17 +642,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { + if !exceptions { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { + for _, pat := range patterns { + if pat[0] != '!' { continue } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + pat = pat[1:] + string(filepath.Separator) + if strings.HasPrefix(pat, dirSlash) { // found a match - so can't skip this dir return nil } @@ -858,8 +703,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. @@ -893,7 +740,7 @@ loop: parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) if err != nil { return err } @@ -938,8 +785,26 @@ loop: } trBuf.Reset(tr) - if err := remapIDs(idMappings, hdr); err != nil { - return err + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID } if whiteoutConverter != nil { @@ -952,7 +817,7 @@ loop: } } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { return err } @@ -1024,13 +889,23 @@ func (archiver *Archiver) TarUntar(src, dst string) error { return err } defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } } return archiver.Untar(archive, dst, options) } +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) @@ -1038,13 +913,22 @@ func (archiver *Archiver) UntarPath(src, dst string) error { return err } defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } } return archiver.Untar(archive, dst, options) } +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no @@ -1061,16 +945,27 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // if this archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner - rootIDs := archiver.IDMappings.RootPair() + rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. @@ -1091,7 +986,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { return err } @@ -1112,10 +1007,28 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - if err := remapIDs(archiver.IDMappings, hdr); err != nil { + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { return err } + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { @@ -1127,7 +1040,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return nil }) defer func() { - if er := <-errC; err == nil && er != nil { + if er := <-errC; err != nil { err = er } }() @@ -1139,10 +1052,16 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return err } -func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { - ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) } // cmdStream executes a command, and returns its stdout as a stream. @@ -1177,7 +1096,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { f, err := ioutil.TempFile(dir, "") if err != nil { return nil, err @@ -1226,26 +1145,3 @@ func (archive *TempArchive) Read(data []byte) (int, error) { } return n, err } - -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return NewDefaultArchiver().UntarPath(src, dst) -} - -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 5a14eb91a93a..e944ca2a753a 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -5,9 +5,9 @@ import ( "os" "path/filepath" "strings" + "syscall" "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" ) func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { @@ -19,7 +19,7 @@ func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { type overlayWhiteoutConverter struct{} -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error { // convert whiteouts to AUFS format if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { // we just rename the file and make it normal @@ -34,16 +34,12 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os // convert opaque dirs to AUFS format by writing an empty file with the prefix opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") if err != nil { - return nil, err + return err } - if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - + if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ + *hdr = tar.Header{ Typeflag: tar.TypeReg, Mode: hdr.Mode & int64(os.ModePerm), Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), @@ -58,7 +54,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os } } - return + return nil } func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { @@ -67,9 +63,12 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { + return false, err + } + // don't write the file itself - return false, err + return false, nil } // if a file was deleted and we are using overlay, we need to create a character device @@ -77,7 +76,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { return false, err } if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux_test.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux_test.go deleted file mode 100644 index 4d5441beea18..000000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package archive - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" - - "github.com/containers/storage/pkg/system" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" -) - -// setupOverlayTestDir creates files in a directory with overlay whiteouts -// Tree layout -// . -// ├── d1 # opaque, 0700 -// │   └── f1 # empty file, 0600 -// ├── d2 # opaque, 0750 -// │   └── f1 # empty file, 0660 -// └── d3 # 0700 -// └── f1 # whiteout, 0644 -func setupOverlayTestDir(t *testing.T, src string) { - // Create opaque directory containing single file and permission 0700 - err := os.Mkdir(filepath.Join(src, "d1"), 0700) - require.NoError(t, err) - - err = system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0) - require.NoError(t, err) - - err = ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600) - require.NoError(t, err) - - // Create another opaque directory containing single file but with permission 0750 - err = os.Mkdir(filepath.Join(src, "d2"), 0750) - require.NoError(t, err) - - err = system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0) - require.NoError(t, err) - - err = ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660) - require.NoError(t, err) - - // Create regular directory with deleted file - err = os.Mkdir(filepath.Join(src, "d3"), 0700) - require.NoError(t, err) - - err = system.Mknod(filepath.Join(src, "d3", "f1"), unix.S_IFCHR, 0) - require.NoError(t, err) -} - -func checkOpaqueness(t *testing.T, path string, opaque string) { - xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - require.NoError(t, err) - - if string(xattrOpaque) != opaque { - t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) - } - -} - -func checkOverlayWhiteout(t *testing.T, path string) { - stat, err := os.Stat(path) - require.NoError(t, err) - - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) - } - if statT.Rdev != 0 { - t.Fatalf("Non-zero device number for whiteout") - } -} - -func checkFileMode(t *testing.T, path string, perm os.FileMode) { - stat, err := os.Stat(path) - require.NoError(t, err) - - if stat.Mode() != perm { - t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) - } -} - -func TestOverlayTarUntar(t *testing.T) { - oldmask, err := system.Umask(0) - require.NoError(t, err) - defer system.Umask(oldmask) - - src, err := ioutil.TempDir("", "storage-test-overlay-tar-src") - require.NoError(t, err) - defer os.RemoveAll(src) - - setupOverlayTestDir(t, src) - - dst, err := ioutil.TempDir("", "storage-test-overlay-tar-dst") - require.NoError(t, err) - defer os.RemoveAll(dst) - - options := &TarOptions{ - Compression: Uncompressed, - WhiteoutFormat: OverlayWhiteoutFormat, - } - archive, err := TarWithOptions(src, options) - require.NoError(t, err) - defer archive.Close() - - err = Untar(archive, dst, options) - require.NoError(t, err) - - checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) - checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) - checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) - - checkOpaqueness(t, filepath.Join(dst, "d1"), "y") - checkOpaqueness(t, filepath.Join(dst, "d2"), "y") - checkOpaqueness(t, filepath.Join(dst, "d3"), "") - checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) -} - -func TestOverlayTarAUFSUntar(t *testing.T) { - oldmask, err := system.Umask(0) - require.NoError(t, err) - defer system.Umask(oldmask) - - src, err := ioutil.TempDir("", "storage-test-overlay-tar-src") - require.NoError(t, err) - defer os.RemoveAll(src) - - setupOverlayTestDir(t, src) - - dst, err := ioutil.TempDir("", "storage-test-overlay-tar-dst") - require.NoError(t, err) - defer os.RemoveAll(dst) - - archive, err := TarWithOptions(src, &TarOptions{ - Compression: Uncompressed, - WhiteoutFormat: OverlayWhiteoutFormat, - }) - require.NoError(t, err) - defer archive.Close() - - err = Untar(archive, dst, &TarOptions{ - Compression: Uncompressed, - WhiteoutFormat: AUFSWhiteoutFormat, - }) - require.NoError(t, err) - - checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) - checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0750) - checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) - checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) - checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) - checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0600) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_test.go b/vendor/github.com/containers/storage/pkg/archive/archive_test.go index 74508c914b2c..85e41227c06e 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_test.go @@ -13,10 +13,6 @@ import ( "strings" "testing" "time" - - "github.com/containers/storage/pkg/idtools" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var tmp string @@ -28,22 +24,35 @@ func init() { } } -var defaultArchiver = NewDefaultArchiver() - -func defaultTarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } } -func defaultUntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } } -func defaultCopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } } -func defaultCopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } } func TestIsArchivePathDir(t *testing.T) { @@ -58,7 +67,7 @@ func TestIsArchivePathDir(t *testing.T) { } func TestIsArchivePathInvalidFile(t *testing.T) { - cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1K count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) @@ -72,14 +81,7 @@ func TestIsArchivePathInvalidFile(t *testing.T) { } func TestIsArchivePathTar(t *testing.T) { - var whichTar string - if runtime.GOOS == "solaris" { - whichTar = "gtar" - } else { - whichTar = "tar" - } - cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) - cmd := exec.Command("sh", "-c", cmdStr) + cmd := exec.Command("sh", "-c", "touch /tmp/archivedata && tar -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) @@ -92,54 +94,53 @@ func TestIsArchivePathTar(t *testing.T) { } } -func testDecompressStream(t *testing.T, ext, compressCommand string) { - cmd := exec.Command("sh", "-c", - fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) +func TestDecompressStreamGzip(t *testing.T) { + cmd := exec.Command("sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") output, err := cmd.CombinedOutput() if err != nil { - t.Fatalf("Failed to create an archive file for test : %s.", output) + t.Fatalf("Fail to create an archive file for test : %s.", output) } - filename := "archive." + ext - archive, err := os.Open(tmp + filename) + archive, err := os.Open(tmp + "archive.gz") + _, err = DecompressStream(archive) if err != nil { - t.Fatalf("Failed to open file %s: %v", filename, err) + t.Fatalf("Failed to decompress a gzip file.") } - defer archive.Close() +} - r, err := DecompressStream(archive) +func TestDecompressStreamBzip2(t *testing.T) { + cmd := exec.Command("sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") + output, err := cmd.CombinedOutput() if err != nil { - t.Fatalf("Failed to decompress %s: %v", filename, err) - } - if _, err = ioutil.ReadAll(r); err != nil { - t.Fatalf("Failed to read the decompressed stream: %v ", err) + t.Fatalf("Fail to create an archive file for test : %s.", output) } - if err = r.Close(); err != nil { - t.Fatalf("Failed to close the decompressed stream: %v ", err) + archive, err := os.Open(tmp + "archive.bz2") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a bzip2 file.") } } -func TestDecompressStreamGzip(t *testing.T) { - testDecompressStream(t, "gz", "gzip -f") -} - -func TestDecompressStreamBzip2(t *testing.T) { - testDecompressStream(t, "bz2", "bzip2 -f") -} - func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } - testDecompressStream(t, "xz", "xz -f") + cmd := exec.Command("sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open(tmp + "archive.xz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress an xz file.") + } } -func TestCompressStreamXzUnsupported(t *testing.T) { +func TestCompressStreamXzUnsuported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } - defer dest.Close() - _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") @@ -151,8 +152,6 @@ func TestCompressStreamBzip2Unsupported(t *testing.T) { if err != nil { t.Fatalf("Fail to create the destination file") } - defer dest.Close() - _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") @@ -164,8 +163,6 @@ func TestCompressStreamInvalid(t *testing.T) { if err != nil { t.Fatalf("Fail to create the destination file") } - defer dest.Close() - _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") @@ -263,8 +260,10 @@ func TestCmdStreamGood(t *testing.T) { } func TestUntarPathWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-archive-test") - require.NoError(t, err) + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file @@ -283,29 +282,33 @@ func TestUntarPathWithInvalidDest(t *testing.T) { cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } - err = defaultUntarPath(tarFile, invalidDestFolder) + err = UntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { - dest, err := ioutil.TempDir("", "storage-archive-test") + dest, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) - err = defaultUntarPath("/invalid/path", dest) + err = UntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "storage-archive-test") - require.NoError(t, err) + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") @@ -326,9 +329,11 @@ func TestUntarPath(t *testing.T) { } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } - err = defaultUntarPath(tarFile, destFolder) + err = UntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } @@ -341,7 +346,7 @@ func TestUntarPath(t *testing.T) { // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "storage-archive-test") + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } @@ -367,7 +372,7 @@ func TestUntarPathWithDestinationFile(t *testing.T) { if err != nil { t.Fatalf("Fail to create the destination file") } - err = defaultUntarPath(tarFile, destFile) + err = UntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } @@ -377,7 +382,7 @@ func TestUntarPathWithDestinationFile(t *testing.T) { // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "storage-archive-test") + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } @@ -410,14 +415,14 @@ func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { if err != nil { t.Fatal(err) } - err = defaultUntarPath(tarFile, destFolder) + err = UntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-archive-test") + tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } @@ -427,14 +432,14 @@ func TestCopyWithTarInvalidSrc(t *testing.T) { if err != nil { t.Fatal(err) } - err = defaultCopyWithTar(invalidSrc, destFolder) + err = CopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-archive-test") + tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } @@ -444,7 +449,7 @@ func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { if err != nil { t.Fatal(err) } - err = defaultCopyWithTar(srcFolder, inexistentDestFolder) + err = CopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } @@ -456,7 +461,7 @@ func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "storage-archive-test") + folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } @@ -473,7 +478,7 @@ func TestCopyWithTarSrcFile(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = defaultCopyWithTar(src, dest) + err = CopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } @@ -486,7 +491,7 @@ func TestCopyWithTarSrcFile(t *testing.T) { // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "storage-archive-test") + folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } @@ -502,7 +507,7 @@ func TestCopyWithTarSrcFolder(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) - err = defaultCopyWithTar(src, dest) + err = CopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } @@ -514,7 +519,7 @@ func TestCopyWithTarSrcFolder(t *testing.T) { } func TestCopyFileWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-archive-test") + tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } @@ -525,14 +530,14 @@ func TestCopyFileWithTarInvalidSrc(t *testing.T) { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") - err = defaultCopyFileWithTar(invalidFile, destFolder) + err = CopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-archive-test") + tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } @@ -543,7 +548,7 @@ func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { if err != nil { t.Fatal(err) } - err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) + err = CopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } @@ -555,7 +560,7 @@ func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { } func TestCopyFileWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "storage-archive-copyfilewithtar-test") + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } @@ -570,14 +575,14 @@ func TestCopyFileWithTarSrcFolder(t *testing.T) { if err != nil { t.Fatal(err) } - err = defaultCopyFileWithTar(src, dest) + err = CopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "storage-archive-test") + folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } @@ -594,7 +599,7 @@ func TestCopyFileWithTarSrcFile(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = defaultCopyWithTar(src, dest+"/") + err = CopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } @@ -620,13 +625,13 @@ func TestTarFiles(t *testing.T) { } func checkNoChanges(fileNum int, hardlinks bool) error { - srcDir, err := ioutil.TempDir("", "storage-test-srcDir") + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) - destDir, err := ioutil.TempDir("", "storage-test-destDir") + destDir, err := ioutil.TempDir("", "docker-test-destDir") if err != nil { return err } @@ -637,7 +642,7 @@ func checkNoChanges(fileNum int, hardlinks bool) error { return err } - err = defaultTarUntar(srcDir, destDir) + err = TarUntar(srcDir, destDir) if err != nil { return err } @@ -671,7 +676,7 @@ func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } - tmp, err := ioutil.TempDir("", "storage-test-untar") + tmp, err := ioutil.TempDir("", "docker-test-untar") if err != nil { return nil, err } @@ -691,7 +696,7 @@ func TestTarUntar(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Failing on Windows") } - origin, err := ioutil.TempDir("", "storage-test-untar-origin") + origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } @@ -725,63 +730,12 @@ func TestTarUntar(t *testing.T) { } } -func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { - origin, err := ioutil.TempDir("", "storage-test-tar-chown-opt") - require.NoError(t, err) - - defer os.RemoveAll(origin) - filePath := filepath.Join(origin, "1") - err = ioutil.WriteFile(filePath, []byte("hello world"), 0700) - require.NoError(t, err) - - idMaps := []idtools.IDMap{ - 0: { - ContainerID: 0, - HostID: 0, - Size: 65536, - }, - 1: { - ContainerID: 0, - HostID: 100000, - Size: 65536, - }, - } - - cases := []struct { - opts *TarOptions - expectedUID int - expectedGID int - }{ - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1337, GID: 42}}, 1337, 42}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, - {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, - } - for _, testCase := range cases { - reader, err := TarWithOptions(filePath, testCase.opts) - require.NoError(t, err) - tr := tar.NewReader(reader) - defer reader.Close() - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - require.NoError(t, err) - assert.Equal(t, hdr.Uid, testCase.expectedUID, "Uid equals expected value") - assert.Equal(t, hdr.Gid, testCase.expectedGID, "Gid equals expected value") - } - } -} - func TestTarWithOptions(t *testing.T) { // TODO Windows: Figure out how to fix this test. if runtime.GOOS == "windows" { t.Skip("Failing on Windows") } - origin, err := ioutil.TempDir("", "storage-test-untar-origin") + origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } @@ -823,12 +777,12 @@ func TestTarWithOptions(t *testing.T) { // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - tmpDir, err := ioutil.TempDir("", "storage-test-archive-pax-test") + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) - err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) if err != nil { t.Fatal(err) } @@ -841,8 +795,6 @@ func TestUntarUstarGnuConflict(t *testing.T) { if err != nil { t.Fatal(err) } - defer f.Close() - found := false tr := tar.NewReader(f) // Iterate through the files in the archive. @@ -883,11 +835,11 @@ func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks } func BenchmarkTarUntar(b *testing.B) { - origin, err := ioutil.TempDir("", "storage-test-untar-origin") + origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } - tempDir, err := ioutil.TempDir("", "storage-test-untar-destination") + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } @@ -902,7 +854,7 @@ func BenchmarkTarUntar(b *testing.B) { b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { - err := defaultTarUntar(origin, target) + err := TarUntar(origin, target) if err != nil { b.Fatal(err) } @@ -911,11 +863,11 @@ func BenchmarkTarUntar(b *testing.B) { } func BenchmarkTarUntarWithLinks(b *testing.B) { - origin, err := ioutil.TempDir("", "storage-test-untar-origin") + origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } - tempDir, err := ioutil.TempDir("", "storage-test-untar-destination") + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } @@ -930,7 +882,7 @@ func BenchmarkTarUntarWithLinks(b *testing.B) { b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { - err := defaultTarUntar(origin, target) + err := TarUntar(origin, target) if err != nil { b.Fatal(err) } @@ -960,7 +912,7 @@ func TestUntarInvalidFilenames(t *testing.T) { }, }, } { - if err := testBreakout("untar", "storage-TestUntarInvalidFilenames", headers); err != nil { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } @@ -992,7 +944,7 @@ func TestUntarHardlinkToSymlink(t *testing.T) { }, }, } { - if err := testBreakout("untar", "storage-TestUntarHardlinkToSymlink", headers); err != nil { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } @@ -1076,7 +1028,7 @@ func TestUntarInvalidHardlink(t *testing.T) { }, }, } { - if err := testBreakout("untar", "storage-TestUntarInvalidHardlink", headers); err != nil { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } @@ -1174,7 +1126,7 @@ func TestUntarInvalidSymlink(t *testing.T) { }, }, } { - if err := testBreakout("untar", "storage-TestUntarInvalidSymlink", headers); err != nil { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } @@ -1194,132 +1146,3 @@ func TestTempArchiveCloseMultipleTimes(t *testing.T) { } } } - -func TestReplaceFileTarWrapper(t *testing.T) { - filesInArchive := 20 - testcases := []struct { - doc string - filename string - modifier TarModifierFunc - expected string - fileCount int - }{ - { - doc: "Modifier creates a new file", - filename: "newfile", - modifier: createModifier(t), - expected: "the new content", - fileCount: filesInArchive + 1, - }, - { - doc: "Modifier replaces a file", - filename: "file-2", - modifier: createOrReplaceModifier, - expected: "the new content", - fileCount: filesInArchive, - }, - { - doc: "Modifier replaces the last file", - filename: fmt.Sprintf("file-%d", filesInArchive-1), - modifier: createOrReplaceModifier, - expected: "the new content", - fileCount: filesInArchive, - }, - { - doc: "Modifier appends to a file", - filename: "file-3", - modifier: appendModifier, - expected: "fooo\nnext line", - fileCount: filesInArchive, - }, - } - - for _, testcase := range testcases { - sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) - defer cleanup() - - resultArchive := ReplaceFileTarWrapper( - sourceArchive, - map[string]TarModifierFunc{testcase.filename: testcase.modifier}) - - actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) - assert.Equal(t, testcase.expected, actual, testcase.doc) - } -} - -/* -// TestPrefixHeaderReadable tests that files that could be created with the -// version of this package that was built with <=go17 are still readable. -func TestPrefixHeaderReadable(t *testing.T) { - // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go - var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") - - tmpDir, err := ioutil.TempDir("", "prefix-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - err = Untar(bytes.NewReader(testFile), tmpDir, nil) - require.NoError(t, err) - - baseName := "foo" - pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName - - _, err = os.Lstat(filepath.Join(tmpDir, pth)) - require.NoError(t, err) -} -*/ - -func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { - srcDir, err := ioutil.TempDir("", "storage-test-srcDir") - require.NoError(t, err) - - _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) - require.NoError(t, err) - - sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) - require.NoError(t, err) - return sourceArchive, func() { - os.RemoveAll(srcDir) - sourceArchive.Close() - } -} - -func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - return &tar.Header{ - Mode: 0600, - Typeflag: tar.TypeReg, - }, []byte("the new content"), nil -} - -func createModifier(t *testing.T) TarModifierFunc { - return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - assert.Nil(t, content) - return createOrReplaceModifier(path, header, content) - } -} - -func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - buffer := bytes.Buffer{} - if content != nil { - if _, err := buffer.ReadFrom(content); err != nil { - return nil, nil, err - } - } - buffer.WriteString("\nnext line") - return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil -} - -func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { - destDir, err := ioutil.TempDir("", "storage-test-destDir") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - err = Untar(archive, destDir, nil) - require.NoError(t, err) - - files, _ := ioutil.ReadDir(destDir) - assert.Len(t, files, expectedCount, doc) - - content, err := ioutil.ReadFile(filepath.Join(destDir, name)) - assert.NoError(t, err) - return string(content) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index bdc1a3d79499..19d731fd2307 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -9,10 +9,7 @@ import ( "path/filepath" "syscall" - "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" ) // fixVolumePathPrefix does platform specific processing to ensure that if @@ -43,38 +40,33 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { s, ok := stat.(*syscall.Stat_t) - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert - } + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return } - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) + inode = uint64(s.Ino) - if ok { - inode = s.Ino + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) } return } -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { +func getFileUIDGID(stat interface{}) (int, int, error) { s, ok := stat.(*syscall.Stat_t) if !ok { - return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") } - return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil + return int(s.Uid), int(s.Gid), nil } func major(device uint64) uint64 { @@ -88,22 +80,20 @@ func minor(device uint64) uint64 { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: - mode |= unix.S_IFBLK + mode |= syscall.S_IFBLK case tar.TypeChar: - mode |= unix.S_IFCHR + mode |= syscall.S_IFCHR case tar.TypeFifo: - mode |= unix.S_IFIFO + mode |= syscall.S_IFIFO } - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix_test.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix_test.go index 095a90931760..4bf0ae2dfa18 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix_test.go @@ -8,14 +8,10 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "syscall" "testing" "github.com/containers/storage/pkg/system" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" ) func TestCanonicalTarNameForPath(t *testing.T) { @@ -71,90 +67,61 @@ func TestChmodTarEntry(t *testing.T) { } func TestTarWithHardLink(t *testing.T) { - origin, err := ioutil.TempDir("", "storage-test-tar-hardlink") - require.NoError(t, err) + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(origin) - - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) - - err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) - require.NoError(t, err) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { + t.Fatal(err) + } var i1, i2 uint64 - i1, err = getNlink(filepath.Join(origin, "1")) - require.NoError(t, err) - + if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { + t.Fatal(err) + } // sanity check that we can hardlink if i1 != 2 { t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) } - dest, err := ioutil.TempDir("", "storage-test-tar-hardlink-dest") - require.NoError(t, err) + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - require.NoError(t, err) - - i1, err = getInode(filepath.Join(dest, "1")) - require.NoError(t, err) - - i2, err = getInode(filepath.Join(dest, "2")) - require.NoError(t, err) - - assert.Equal(t, i1, i2) -} - -func TestTarWithHardLinkAndRebase(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "storage-test-tar-hardlink-rebase") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - origin := filepath.Join(tmpDir, "origin") - err = os.Mkdir(origin, 0700) - require.NoError(t, err) - - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) - - err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) - require.NoError(t, err) - - var i1, i2 uint64 - i1, err = getNlink(filepath.Join(origin, "1")) - require.NoError(t, err) - - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + if err != nil { + t.Fatal(err) } - dest := filepath.Join(tmpDir, "dest") - bRdr, err := TarResourceRebase(origin, "origin") - require.NoError(t, err) - - dstDir, srcBase := SplitPathDirEntry(origin) - _, dstBase := SplitPathDirEntry(dest) - content := RebaseArchiveEntries(bRdr, srcBase, dstBase) - err = Untar(content, dstDir, &TarOptions{Compression: Uncompressed, NoLchown: true, NoOverwriteDirNonDir: true}) - require.NoError(t, err) - - i1, err = getInode(filepath.Join(dest, "1")) - require.NoError(t, err) - i2, err = getInode(filepath.Join(dest, "2")) - require.NoError(t, err) + if i1, err = getInode(filepath.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(filepath.Join(dest, "2")); err != nil { + t.Fatal(err) + } - assert.Equal(t, i1, i2) + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } } func getNlink(path string) (uint64, error) { @@ -183,39 +150,52 @@ func getInode(path string) (uint64, error) { } func TestTarWithBlockCharFifo(t *testing.T) { - origin, err := ioutil.TempDir("", "storage-test-tar-hardlink") - require.NoError(t, err) - + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(origin) - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) - - err = system.Mknod(filepath.Join(origin, "2"), unix.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))) - require.NoError(t, err) - err = system.Mknod(filepath.Join(origin, "3"), unix.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))) - require.NoError(t, err) - err = system.Mknod(filepath.Join(origin, "4"), unix.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))) - require.NoError(t, err) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } - dest, err := ioutil.TempDir("", "storage-test-tar-hardlink-dest") - require.NoError(t, err) + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } changes, err := ChangesDirs(origin, dest) - require.NoError(t, err) - + if err != nil { + t.Fatal(err) + } if len(changes) > 0 { t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) } @@ -223,21 +203,23 @@ func TestTarWithBlockCharFifo(t *testing.T) { // TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows func TestTarUntarWithXattr(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip() + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) } - origin, err := ioutil.TempDir("", "storage-test-untar-origin") - require.NoError(t, err) defer os.RemoveAll(origin) - err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) - - err = ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700) - require.NoError(t, err) - err = system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0) - require.NoError(t, err) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } for _, c := range []Compression{ Uncompressed, diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index 0bcbb925d2bf..828d3b9d0b04 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -9,7 +9,6 @@ import ( "path/filepath" "strings" - "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/longpath" ) @@ -43,23 +42,15 @@ func CanonicalTarNameForPath(p string) (string, error) { // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) - permPart := perm & os.ModePerm - noPermPart := perm &^ os.ModePerm + perm &= 0755 // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 + perm |= 0111 - return noPermPart | permPart + return perm } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows return } @@ -73,7 +64,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { +func getFileUIDGID(stat interface{}) (int, int, error) { // no notion of file ownership mapping yet on Windows - return idtools.IDPair{0, 0}, nil + return 0, 0, nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows_test.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows_test.go index 849d99c2d31b..0c6733d6bd10 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows_test.go @@ -14,7 +14,7 @@ func TestCopyFileWithInvalidDest(t *testing.T) { // recently changed in CopyWithTar as used to pass. Further investigation // is required. t.Skip("Currently fails") - folder, err := ioutil.TempDir("", "storage-archive-test") + folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } @@ -27,7 +27,7 @@ func TestCopyFileWithInvalidDest(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = defaultCopyWithTar(src, dest) + err = CopyWithTar(src, dest) if err == nil { t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") } @@ -82,8 +82,6 @@ func TestChmodTarEntry(t *testing.T) { {0644, 0755}, {0755, 0755}, {0444, 0555}, - {0755 | os.ModeDir, 0755 | os.ModeDir}, - {0755 | os.ModeSymlink, 0755 | os.ModeSymlink}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 6ba4b8ec6f56..488e12989a2f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -267,7 +267,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { } for name, newChild := range info.children { - oldChild := oldChildren[name] + oldChild, _ := oldChildren[name] if oldChild != nil { // change? oldStat := oldChild.stat @@ -279,7 +279,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // breaks down is if some code intentionally hides a change by setting // back mtime if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { + bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), Kind: ChangeModify, @@ -391,11 +391,16 @@ func ChangesSize(newDir string, changes []Change) int64 { } // ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { reader, writer := io.Pipe() go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) - + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go index 90c9a627e5be..798d7bfccbad 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -10,7 +10,6 @@ import ( "unsafe" "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" ) // walker is used to implement collectFileInfoForChanges on linux. Where this @@ -66,7 +65,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { } parent := root.LookUp(filepath.Dir(path)) if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) } info := &FileInfo{ name: filepath.Base(path), @@ -234,7 +233,7 @@ func readdirnames(dirname string) (names []nameIno, err error) { // Refill the buffer if necessary if bufp >= nbuf { bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux if nbuf < 0 { nbuf = 0 } @@ -256,12 +255,12 @@ func readdirnames(dirname string) (names []nameIno, err error) { return sl, nil } -// parseDirent is a minor modification of unix.ParseDirent (linux version) +// parseDirent is a minor modification of syscall.ParseDirent (linux version) // which returns {name,inode} pairs instead of just names. func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { origlen := len(buf) for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue @@ -294,7 +293,7 @@ func OverlayChanges(layers []string, rw string) ([]Change, error) { func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { if fi.Mode()&os.ModeCharDevice != 0 { s := fi.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { return path, nil } } @@ -303,7 +302,7 @@ func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { if err != nil { return "", err } - if len(opaque) == 1 && opaque[0] == 'y' { + if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { return path, nil } } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_posix_test.go b/vendor/github.com/containers/storage/pkg/archive/changes_posix_test.go index 9d6defc565c6..5a3282b5a8ab 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_posix_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_posix_test.go @@ -7,25 +7,20 @@ import ( "io/ioutil" "os" "path" - "runtime" "sort" "testing" ) func TestHardLinkOrder(t *testing.T) { - //TODO Should run for Solaris - if runtime.GOOS == "solaris" { - t.Skip("gcp failures on Solaris") - } names := []string{"file1.txt", "file2.txt", "file3.txt"} msg := []byte("Hey y'all") // Create dir - src, err := ioutil.TempDir("", "storage-hardlink-test-src-") + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") if err != nil { t.Fatal(err) } - defer os.RemoveAll(src) + //defer os.RemoveAll(src) for _, name := range names { func() { fh, err := os.Create(path.Join(src, name)) @@ -39,7 +34,7 @@ func TestHardLinkOrder(t *testing.T) { }() } // Create dest, with changes that includes hardlinks - dest, err := ioutil.TempDir("", "storage-hardlink-test-dest-") + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_test.go b/vendor/github.com/containers/storage/pkg/archive/changes_test.go index c92b6ee38daa..f19c9fd14897 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_test.go @@ -11,7 +11,6 @@ import ( "time" "github.com/containers/storage/pkg/system" - "github.com/stretchr/testify/require" ) func max(x, y int) int { @@ -23,10 +22,6 @@ func max(x, y int) int { func copyDir(src, dst string) error { cmd := exec.Command("cp", "-a", src, dst) - if runtime.GOOS == "solaris" { - cmd = exec.Command("gcp", "-a", src, dst) - } - if err := cmd.Run(); err != nil { return err } @@ -80,29 +75,33 @@ func createSampleDir(t *testing.T, root string) { for _, info := range files { p := path.Join(root, info.path) if info.filetype == Dir { - err := os.MkdirAll(p, info.permissions) - require.NoError(t, err) + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } } else if info.filetype == Regular { - err := ioutil.WriteFile(p, []byte(info.contents), info.permissions) - require.NoError(t, err) + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } } else if info.filetype == Symlink { - err := os.Symlink(info.contents, p) - require.NoError(t, err) + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs - err := system.Chtimes(p, now, now) - require.NoError(t, err) + if err := system.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } } } } func TestChangeString(t *testing.T) { - modifyChange := Change{"change", ChangeModify} - toString := modifyChange.String() + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() if toString != "C change" { - t.Fatalf("String() of a change with ChangeModify Kind should have been %s but was %s", "C change", toString) + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) } addChange := Change{"change", ChangeAdd} toString = addChange.String() @@ -122,15 +121,21 @@ func TestChangesWithNoChanges(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("symlinks on Windows") } - rwLayer, err := ioutil.TempDir("", "storage-changes-test") - require.NoError(t, err) + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(rwLayer) - layer, err := ioutil.TempDir("", "storage-changes-test-layer") - require.NoError(t, err) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(layer) createSampleDir(t, layer) changes, err := Changes([]string{layer}, rwLayer) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } if len(changes) != 0 { t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) } @@ -143,15 +148,19 @@ func TestChangesWithChanges(t *testing.T) { t.Skip("symlinks on Windows") } // Mock the readonly layer - layer, err := ioutil.TempDir("", "storage-changes-test-layer") - require.NoError(t, err) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(layer) createSampleDir(t, layer) os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) // Mock the RW layer - rwLayer, err := ioutil.TempDir("", "storage-changes-test") - require.NoError(t, err) + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(rwLayer) // Create a folder in RW layer @@ -168,7 +177,9 @@ func TestChangesWithChanges(t *testing.T) { ioutil.WriteFile(newFile, []byte{}, 0740) changes, err := Changes([]string{layer}, rwLayer) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } expectedChanges := []Change{ {"/dir1", ChangeModify}, @@ -187,7 +198,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("symlinks on Windows") } - baseLayer, err := ioutil.TempDir("", "storage-changes-test.") + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") defer os.RemoveAll(baseLayer) dir3 := path.Join(baseLayer, "dir1/dir2/dir3") @@ -196,7 +207,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { file := path.Join(dir3, "file.txt") ioutil.WriteFile(file, []byte("hello"), 0666) - layer, err := ioutil.TempDir("", "storage-changes-test2.") + layer, err := ioutil.TempDir("", "docker-changes-test2.") defer os.RemoveAll(layer) // Test creating a new file @@ -209,7 +220,9 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("bye"), 0666) changes, err := Changes([]string{baseLayer}, layer) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } expectedChanges := []Change{ {"/dir1/dir2/dir3", ChangeModify}, @@ -218,7 +231,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { checkChanges(expectedChanges, changes, t) // Now test changing a file - layer, err = ioutil.TempDir("", "storage-changes-test3.") + layer, err = ioutil.TempDir("", "docker-changes-test3.") defer os.RemoveAll(layer) if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { @@ -229,7 +242,9 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("bye"), 0666) changes, err = Changes([]string{baseLayer}, layer) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } expectedChanges = []Change{ {"/dir1/dir2/dir3/file.txt", ChangeModify}, @@ -241,20 +256,24 @@ func TestChangesWithChangesGH13590(t *testing.T) { func TestChangesDirsEmpty(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now // as createSampleDir uses symlinks. - // TODO Should work for Solaris - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("symlinks on Windows; gcp failure on Solaris") + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) } - src, err := ioutil.TempDir("", "storage-changes-test") - require.NoError(t, err) defer os.RemoveAll(src) createSampleDir(t, src) dst := src + "-copy" - err = copyDir(src, dst) - require.NoError(t, err) + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } if len(changes) != 0 { t.Fatalf("Reported changes for identical dirs: %v", changes) @@ -265,87 +284,107 @@ func TestChangesDirsEmpty(t *testing.T) { func mutateSampleDir(t *testing.T, root string) { // Remove a regular file - err := os.RemoveAll(path.Join(root, "file1")) - require.NoError(t, err) + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } // Remove a directory - err = os.RemoveAll(path.Join(root, "dir1")) - require.NoError(t, err) + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } // Remove a symlink - err = os.RemoveAll(path.Join(root, "symlink1")) - require.NoError(t, err) + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } // Rewrite a file - err = ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777) - require.NoError(t, err) + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } // Replace a file - err = os.RemoveAll(path.Join(root, "file3")) - require.NoError(t, err) - err = ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404) - require.NoError(t, err) + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } // Touch file - err = system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)) - require.NoError(t, err) + if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } // Replace file with dir - err = os.RemoveAll(path.Join(root, "file5")) - require.NoError(t, err) - err = os.MkdirAll(path.Join(root, "file5"), 0666) - require.NoError(t, err) + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } // Create new file - err = ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777) - require.NoError(t, err) + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } // Create new dir - err = os.MkdirAll(path.Join(root, "dirnew"), 0766) - require.NoError(t, err) + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } // Create a new symlink - err = os.Symlink("targetnew", path.Join(root, "symlinknew")) - require.NoError(t, err) + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } // Change a symlink - err = os.RemoveAll(path.Join(root, "symlink2")) - require.NoError(t, err) - - err = os.Symlink("target2change", path.Join(root, "symlink2")) - require.NoError(t, err) + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } // Replace dir with file - err = os.RemoveAll(path.Join(root, "dir2")) - require.NoError(t, err) - err = ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777) - require.NoError(t, err) + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } // Touch dir - err = system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)) - require.NoError(t, err) + if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } } func TestChangesDirsMutated(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now // as createSampleDir uses symlinks. - // TODO Should work for Solaris - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("symlinks on Windows; gcp failures on Solaris") + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) } - src, err := ioutil.TempDir("", "storage-changes-test") - require.NoError(t, err) createSampleDir(t, src) dst := src + "-copy" - err = copyDir(src, dst) - require.NoError(t, err) + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } defer os.RemoveAll(src) defer os.RemoveAll(dst) mutateSampleDir(t, dst) changes, err := ChangesDirs(dst, src) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } sort.Sort(changesByPath(changes)) @@ -386,34 +425,45 @@ func TestChangesDirsMutated(t *testing.T) { func TestApplyLayer(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now // as createSampleDir uses symlinks. - // TODO Should work for Solaris - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("symlinks on Windows; gcp failures on Solaris") + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) } - src, err := ioutil.TempDir("", "storage-changes-test") - require.NoError(t, err) createSampleDir(t, src) defer os.RemoveAll(src) dst := src + "-copy" - err = copyDir(src, dst) - require.NoError(t, err) + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } mutateSampleDir(t, dst) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } layer, err := ExportChanges(dst, changes, nil, nil) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } layerCopy, err := NewTempArchive(layer, "") - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } - _, err = ApplyLayer(src, layerCopy) - require.NoError(t, err) + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } changes2, err := ChangesDirs(src, dst) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } if len(changes2) != 0 { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) @@ -426,19 +476,27 @@ func TestChangesSizeWithHardlinks(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("hardlinks on Windows") } - srcDir, err := ioutil.TempDir("", "storage-test-srcDir") - require.NoError(t, err) + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(srcDir) - destDir, err := ioutil.TempDir("", "storage-test-destDir") - require.NoError(t, err) + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } defer os.RemoveAll(destDir) creationSize, err := prepareUntarSourceDirectory(100, destDir, true) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } changes, err := ChangesDirs(destDir, srcDir) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } got := ChangesSize(destDir, changes) if got != int64(creationSize) { @@ -464,15 +522,16 @@ func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { } func TestChangesSize(t *testing.T) { - parentPath, err := ioutil.TempDir("", "storage-changes-test") + parentPath, err := ioutil.TempDir("", "docker-changes-test") defer os.RemoveAll(parentPath) addition := path.Join(parentPath, "addition") - err = ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744) - require.NoError(t, err) + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } modification := path.Join(parentPath, "modification") - err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744) - require.NoError(t, err) - + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } changes := []Change{ {Path: "addition", Kind: ChangeAdd}, {Path: "modification", Kind: ChangeModify}, diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go index d669c01b46b3..43dd94e2d721 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -7,7 +7,6 @@ import ( "syscall" "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" ) func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { @@ -17,7 +16,7 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { return true } @@ -25,11 +24,11 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 } func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino + return uint64(fi.Sys().(*syscall.Stat_t).Ino) } func hasHardlinks(fi os.FileInfo) bool { diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go index 5ad3d7e38da1..06eadd662bc1 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -9,16 +9,16 @@ import ( func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { // Don't look at size for dirs, its not a good measure of change - if oldStat.Mtim() != newStat.Mtim() || + if oldStat.ModTime() != newStat.ModTime() || oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { return true } return false } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() + return info.parent == nil || info.stat.IsDir() } func getIno(fi os.FileInfo) (inode uint64) { diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index ea012b2d99d3..c970f422aa0d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -88,13 +88,13 @@ func SplitPathDirEntry(path string) (dir, base string) { // This function acts as a convenient wrapper around TarWithOptions, which // requires a directory as the source path. TarResource accepts either a // directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { +func TarResource(sourceInfo CopyInfo) (content Archive, err error) { return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) } // TarResourceRebase is like TarResource but renames the first path element of // items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { +func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { sourcePath = normalizePath(sourcePath) if _, err = os.Lstat(sourcePath); err != nil { // Catches the case where the source does not exist or is not a @@ -103,7 +103,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er return } - // Separate the source path between its directory and + // Separate the source path between it's directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) @@ -241,7 +241,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { // contain the archived resource described by srcInfo, to the destination // described by dstInfo. Returns the possibly modified content archive along // with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { +func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { // Ensure in platform semantics srcInfo.Path = normalizePath(srcInfo.Path) dstInfo.Path = normalizePath(dstInfo.Path) @@ -304,7 +304,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir // RebaseArchiveEntries rewrites the given srcContent archive replacing // an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { +func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { if oldBase == string(os.PathSeparator) { // If oldBase specifies the root directory, use an empty string as // oldBase instead so that newBase doesn't replace the path separator @@ -332,9 +332,6 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read } hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } if err = rebasedTar.WriteHeader(hdr); err != nil { w.CloseWithError(err) @@ -383,7 +380,7 @@ func CopyResource(srcPath, dstPath string, followLink bool) error { // CopyTo handles extracting the given content whose // entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { +func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { // The destination path need not exist, but CopyInfoDestinationPath will // ensure that at least the parent directory exists. dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix_test.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix_test.go index e08bcb491650..ecbfc172b01b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy_unix_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix_test.go @@ -1,6 +1,6 @@ // +build !windows -// TODO Windows: Some of these tests may be salvageable and portable to Windows. +// TODO Windows: Some of these tests may be salvagable and portable to Windows. package archive @@ -15,8 +15,6 @@ import ( "path/filepath" "strings" "testing" - - "github.com/stretchr/testify/require" ) func removeAllPaths(paths ...string) { @@ -28,11 +26,13 @@ func removeAllPaths(paths ...string) { func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { var err error - tmpDirA, err = ioutil.TempDir("", "archive-copy-test") - require.NoError(t, err) + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } - tmpDirB, err = ioutil.TempDir("", "archive-copy-test") - require.NoError(t, err) + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } return } @@ -118,8 +118,9 @@ func logDirContents(t *testing.T, dirPath string) { t.Logf("logging directory contents: %q", dirPath) - err := filepath.Walk(dirPath, logWalkedPaths) - require.NoError(t, err) + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } } func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { @@ -292,8 +293,9 @@ func TestCopyCaseA(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } os.Remove(dstPath) symlinkPath := filepath.Join(tmpDirA, "symlink3") @@ -304,15 +306,17 @@ func TestCopyCaseA(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } os.Remove(dstPath) if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } } // B. SRC specifies a file and DST (with trailing path separator) doesn't @@ -373,8 +377,9 @@ func TestCopyCaseC(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } } // C. Symbol link following version: @@ -410,8 +415,9 @@ func TestCopyCaseCFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } } // D. SRC specifies a file and DST exists as a directory. This should place @@ -440,8 +446,9 @@ func TestCopyCaseD(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } // Now try again but using a trailing path separator for dstDir. @@ -459,8 +466,9 @@ func TestCopyCaseD(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } } // D. Symbol link following version: @@ -491,8 +499,9 @@ func TestCopyCaseDFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } // Now try again but using a trailing path separator for dstDir. @@ -510,8 +519,9 @@ func TestCopyCaseDFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } } // E. SRC specifies a directory and DST does not exist. This should create a @@ -553,8 +563,9 @@ func TestCopyCaseE(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, dstDir, srcDir) - require.NoError(t, err) + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } } // E. Symbol link following version: @@ -598,8 +609,9 @@ func TestCopyCaseEFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, dstDir, linkTarget) - require.NoError(t, err) + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } } // F. SRC specifies a directory and DST exists as a file. This should cause an @@ -657,8 +669,9 @@ func TestCopyCaseG(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, resultDir, srcDir) - require.NoError(t, err) + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } // Now try again but using a trailing path separator for dstDir. @@ -676,8 +689,9 @@ func TestCopyCaseG(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, resultDir, srcDir) - require.NoError(t, err) + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } } // G. Symbol link version: @@ -703,8 +717,9 @@ func TestCopyCaseGFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, resultDir, linkTarget) - require.NoError(t, err) + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } // Now try again but using a trailing path separator for dstDir. @@ -722,8 +737,9 @@ func TestCopyCaseGFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, resultDir, linkTarget) - require.NoError(t, err) + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } } // H. SRC specifies a directory's contents only and DST does not exist. This @@ -883,8 +899,9 @@ func TestCopyCaseJ(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, dstDir, srcDir) - require.NoError(t, err) + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } // Now try again but using a trailing path separator for dstDir. @@ -902,8 +919,9 @@ func TestCopyCaseJ(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, dstDir, srcDir) - require.NoError(t, err) + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } } // J. Symbol link following version: @@ -934,8 +952,9 @@ func TestCopyCaseJFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, dstDir, linkTarget) - require.NoError(t, err) + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } // Now try again but using a trailing path separator for dstDir. @@ -953,6 +972,7 @@ func TestCopyCaseJFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - err = dirContentsEqual(t, dstDir, linkTarget) - require.NoError(t, err) + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } } diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index f93f4cb175a6..c7ad4d940767 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -19,7 +19,7 @@ import ( // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { +func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) @@ -33,11 +33,17 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) + if options == nil { + options = &TarOptions{} + } // Iterate through the files in the archive. for { hdr, err := tr.Next() @@ -84,7 +90,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600, "") + err = system.MkdirAll(parentPath, 0600) if err != nil { return 0, err } @@ -105,7 +111,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { return 0, err } } @@ -192,11 +198,28 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - if err := remapIDs(idMappings, srcHdr); err != nil { - return 0, err + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID } - - if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { return 0, err } @@ -223,7 +246,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // and applies it to the directory `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { +func ApplyLayer(dest string, layer Reader) (int64, error) { return applyLayerHandler(dest, layer, &TarOptions{}, true) } @@ -231,12 +254,12 @@ func ApplyLayer(dest string, layer io.Reader) (int64, error) { // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { +func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } // do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { +func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms diff --git a/vendor/github.com/containers/storage/pkg/archive/diff_test.go b/vendor/github.com/containers/storage/pkg/archive/diff_test.go index d3fec31ed938..b6f654b76a3e 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff_test.go @@ -35,7 +35,7 @@ func TestApplyLayerInvalidFilenames(t *testing.T) { }, }, } { - if err := testBreakout("applylayer", "storage-TestApplyLayerInvalidFilenames", headers); err != nil { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } @@ -118,7 +118,7 @@ func TestApplyLayerInvalidHardlink(t *testing.T) { }, }, } { - if err := testBreakout("applylayer", "storage-TestApplyLayerInvalidHardlink", headers); err != nil { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } @@ -201,7 +201,7 @@ func TestApplyLayerInvalidSymlink(t *testing.T) { }, }, } { - if err := testBreakout("applylayer", "storage-TestApplyLayerInvalidSymlink", headers); err != nil { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } diff --git a/vendor/github.com/containers/storage/pkg/archive/utils_test.go b/vendor/github.com/containers/storage/pkg/archive/utils_test.go index 01b9e92d1cdd..98719032f34f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/utils_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/utils_test.go @@ -16,7 +16,7 @@ var testUntarFns = map[string]func(string, io.Reader) error{ return Untar(r, dest, nil) }, "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, r) + _, err := ApplyLayer(dest, Reader(r)) return err }, } diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go index b39d12c87800..dfb335c0b6c0 100644 --- a/vendor/github.com/containers/storage/pkg/archive/wrap.go +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -3,7 +3,7 @@ package archive import ( "archive/tar" "bytes" - "io" + "io/ioutil" ) // Generate generates a new archive from the content provided @@ -22,7 +22,7 @@ import ( // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { +func Generate(input ...string) (Archive, error) { files := parseStringPairs(input...) buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -42,7 +42,7 @@ func Generate(input ...string) (io.Reader, error) { if err := tw.Close(); err != nil { return nil, err } - return buf, nil + return ioutil.NopCloser(buf), nil } func parseStringPairs(input ...string) (output [][2]string) { diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap_test.go b/vendor/github.com/containers/storage/pkg/archive/wrap_test.go index bd26bda3a2ae..46ab36697a75 100644 --- a/vendor/github.com/containers/storage/pkg/archive/wrap_test.go +++ b/vendor/github.com/containers/storage/pkg/archive/wrap_test.go @@ -5,13 +5,13 @@ import ( "bytes" "io" "testing" - - "github.com/stretchr/testify/require" ) func TestGenerateEmptyFile(t *testing.T) { archive, err := Generate("emptyFile") - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } if archive == nil { t.Fatal("The generated archive should not be nil.") } @@ -28,7 +28,9 @@ func TestGenerateEmptyFile(t *testing.T) { if err == io.EOF { break } - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() @@ -52,7 +54,9 @@ func TestGenerateEmptyFile(t *testing.T) { func TestGenerateWithContent(t *testing.T) { archive, err := Generate("file", "content") - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } if archive == nil { t.Fatal("The generated archive should not be nil.") } @@ -69,7 +73,9 @@ func TestGenerateWithContent(t *testing.T) { if err == io.EOF { break } - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index 2735f1400181..649575c00663 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -11,13 +11,7 @@ import ( "github.com/containers/storage/pkg/idtools" ) -// NewArchiver returns a new Archiver which uses chrootarchive.Untar -func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { - if idMappings == nil { - idMappings = &idtools.IDMappings{} - } - return &archive.Archiver{Untar: Untar, IDMappings: idMappings} -} +var chrootArchiver = &archive.Archiver{Untar: Untar} // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. @@ -36,6 +30,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + if tarArchive == nil { return fmt.Errorf("Empty archive") } @@ -46,12 +41,14 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions options.ExcludePatterns = []string{} } - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { return err } } @@ -68,3 +65,33 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions return invokeUnpack(r, dest, options) } + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_test.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_test.go index cb3253d9f9bb..4a9b0115db30 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_test.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_test.go @@ -22,32 +22,14 @@ func init() { reexec.Init() } -var chrootArchiver = NewArchiver(nil) - -func TarUntar(src, dst string) error { - return chrootArchiver.TarUntar(src, dst) -} - -func CopyFileWithTar(src, dst string) (err error) { - return chrootArchiver.CopyFileWithTar(src, dst) -} - -func UntarPath(src, dst string) error { - return chrootArchiver.UntarPath(src, dst) -} - -func CopyWithTar(src, dst string) error { - return chrootArchiver.CopyWithTar(src, dst) -} - func TestChrootTarUntar(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "storage-TestChrootTarUntar") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700, ""); err != nil { + if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { @@ -61,7 +43,7 @@ func TestChrootTarUntar(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(dest, 0700, ""); err != nil { + if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { @@ -72,13 +54,13 @@ func TestChrootTarUntar(t *testing.T) { // gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of // local images) func TestChrootUntarWithHugeExcludesList(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "storage-TestChrootUntarHugeExcludes") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700, ""); err != nil { + if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { @@ -89,13 +71,13 @@ func TestChrootUntarWithHugeExcludesList(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700, ""); err != nil { + if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } options := &archive.TarOptions{} //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow //on most systems when passed via environment or command line arguments - excludes := make([]string, 65534) + excludes := make([]string, 65534, 65534) for i := 0; i < 65534; i++ { excludes[i] = strings.Repeat(string(i), 64) } @@ -106,7 +88,7 @@ func TestChrootUntarWithHugeExcludesList(t *testing.T) { } func TestChrootUntarEmptyArchive(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "storage-TestChrootUntarEmptyArchive") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") if err != nil { t.Fatal(err) } @@ -174,16 +156,16 @@ func TestChrootTarUntarWithSymlink(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Failing on Windows") } - tmpdir, err := ioutil.TempDir("", "storage-TestChrootTarUntarWithSymlink") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700, ""); err != nil { + if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } - if _, err := prepareSourceDirectory(10, src, false); err != nil { + if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") @@ -197,16 +179,16 @@ func TestChrootTarUntarWithSymlink(t *testing.T) { func TestChrootCopyWithTar(t *testing.T) { // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("Failing on Windows and Solaris") + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") } - tmpdir, err := ioutil.TempDir("", "storage-TestChrootCopyWithTar") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700, ""); err != nil { + if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { @@ -246,13 +228,13 @@ func TestChrootCopyWithTar(t *testing.T) { } func TestChrootCopyFileWithTar(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "storage-TestChrootCopyFileWithTar") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700, ""); err != nil { + if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { @@ -293,16 +275,16 @@ func TestChrootUntarPath(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Failing on Windows") } - tmpdir, err := ioutil.TempDir("", "storage-TestChrootUntarPath") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700, ""); err != nil { + if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } - if _, err := prepareSourceDirectory(10, src, false); err != nil { + if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") @@ -354,13 +336,13 @@ func (s *slowEmptyTarReader) Read(p []byte) (int, error) { } func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "storage-TestChrootUntarEmptyArchiveFromSlowReader") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700, ""); err != nil { + if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} @@ -370,13 +352,13 @@ func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { } func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "storage-TestChrootApplyEmptyArchiveFromSlowReader") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700, ""); err != nil { + if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} @@ -386,13 +368,13 @@ func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { } func TestChrootApplyDotDotFile(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "storage-TestChrootApplyDotDotFile") + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700, ""); err != nil { + if err := system.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { @@ -403,7 +385,7 @@ func TestChrootApplyDotDotFile(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700, ""); err != nil { + if err := system.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } if _, err := ApplyLayer(dest, stream); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index e8bd22e36b3c..54b5ff4899fe 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -5,10 +5,9 @@ import ( "io/ioutil" "os" "path/filepath" + "syscall" "github.com/containers/storage/pkg/mount" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" ) // chroot on linux uses pivot_root instead of chroot @@ -18,25 +17,14 @@ import ( // Old root is removed after the call to pivot_root so it is no longer available under the new root. // This is similar to how libcontainer sets up a container's rootfs func chroot(path string) (err error) { - // if the engine is running in a user namespace we need to use actual chroot - if rsystem.RunningInUserNS() { - return realChroot(path) - } - if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { return fmt.Errorf("Error creating mount namespace before pivot: %v", err) } - // make everything in new ns private - if err := mount.MakeRPrivate("/"); err != nil { + if err := mount.MakeRPrivate(path); err != nil { return err } - if mounted, _ := mount.Mounted(path); !mounted { - if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { - return realChroot(path) - } - } - // setup oldRoot for pivot_root pivotDir, err := ioutil.TempDir(path, ".pivot_root") if err != nil { @@ -47,7 +35,7 @@ func chroot(path string) (err error) { defer func() { if mounted { // make sure pivotDir is not mounted before we try to remove it - if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { if err == nil { err = errCleanup } @@ -64,9 +52,16 @@ func chroot(path string) (err error) { err = errCleanup } } + + if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = fmt.Errorf("error unmounting root: %v", errCleanup) + } + return + } }() - if err := unix.PivotRoot(path, pivotDir); err != nil { + if err := syscall.PivotRoot(path, pivotDir); err != nil { // If pivot fails, fall back to the normal chroot after cleaning up temp dir if err := os.Remove(pivotDir); err != nil { return fmt.Errorf("Error cleaning up after failed pivot: %v", err) @@ -79,17 +74,17 @@ func chroot(path string) (err error) { // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - if err := unix.Chdir("/"); err != nil { + if err := syscall.Chdir("/"); err != nil { return fmt.Errorf("Error changing to new root: %v", err) } // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { return fmt.Errorf("Error making old root private after pivot: %v", err) } // Now unmount the old root so it's no longer visible from the new root - if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { return fmt.Errorf("Error while unmounting old root after pivot: %v", err) } mounted = false @@ -98,10 +93,10 @@ func chroot(path string) (err error) { } func realChroot(path string) error { - if err := unix.Chroot(path); err != nil { + if err := syscall.Chroot(path); err != nil { return fmt.Errorf("Error after fallback to chroot: %v", err) } - if err := unix.Chdir("/"); err != nil { + if err := syscall.Chdir("/"); err != nil { return fmt.Errorf("Error changing to new root after chroot: %v", err) } return nil diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go index f9b5dece8c97..16354bf64877 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -2,11 +2,11 @@ package chrootarchive -import "golang.org/x/sys/unix" +import "syscall" func chroot(path string) error { - if err := unix.Chroot(path); err != nil { + if err := syscall.Chroot(path); err != nil { return err } - return unix.Chdir("/") + return syscall.Chdir("/") } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go index 68b8f74f7755..377aeb9f0ad4 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -1,16 +1,12 @@ package chrootarchive -import ( - "io" - - "github.com/containers/storage/pkg/archive" -) +import "github.com/containers/storage/pkg/archive" // ApplyLayer parses a diff in the standard layer format from `layer`, // and applies it to the directory `dest`. The stream `layer` can only be // uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { +func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) } @@ -18,6 +14,6 @@ func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { // `layer`, and applies it to the directory `dest`. The stream `layer` // can only be uncompressed. // Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { +func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { return applyLayerHandler(dest, layer, options, false) } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index 4369f30c9986..3a9f9a822bc7 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -7,7 +7,6 @@ import ( "encoding/json" "flag" "fmt" - "io" "io/ioutil" "os" "path/filepath" @@ -16,7 +15,6 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" ) type applyLayerResponse struct { @@ -29,14 +27,13 @@ type applyLayerResponse struct { func applyLayer() { var ( - tmpDir string + tmpDir = "" err error options *archive.TarOptions ) runtime.LockOSThread() flag.Parse() - inUserns := rsystem.RunningInUserNS() if err := chroot(flag.Arg(0)); err != nil { fatal(err) } @@ -52,10 +49,6 @@ func applyLayer() { fatal(err) } - if inUserns { - options.InUserNS = true - } - if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { fatal(err) } @@ -82,7 +75,7 @@ func applyLayer() { // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer) @@ -95,9 +88,6 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions } if options == nil { options = &archive.TarOptions{} - if rsystem.RunningInUserNS() { - options.InUserNS = true - } } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go index 8f8e88bfbeab..534d2708aaec 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -2,7 +2,6 @@ package chrootarchive import ( "fmt" - "io" "io/ioutil" "os" "path/filepath" @@ -14,7 +13,7 @@ import ( // applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) // Ensure it is a Windows-style volume path @@ -38,7 +37,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions s, err := archive.UnpackLayer(dest, layer, nil) os.RemoveAll(tmpDir) if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) } return s, nil diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go index 6a0ac2464795..1ed0e861fc2f 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -1,4 +1,4 @@ -// +build linux,cgo +// +build linux package devicemapper @@ -7,14 +7,17 @@ import ( "fmt" "os" "runtime" + "syscall" "unsafe" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) -// Same as DM_DEVICE_* enum values from libdevmapper.h -// nolint: deadcode +// DevmapperLogger defines methods for logging with devicemapper. +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + const ( deviceCreate TaskType = iota deviceReload @@ -152,7 +155,6 @@ func (t *Task) run() error { if res := DmTaskRun(t.unmanaged); res != 1 { return ErrTaskRun } - runtime.KeepAlive(t) return nil } @@ -255,12 +257,25 @@ func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start // UdevWait waits for any processes that are waiting for udev to complete the specified cookie. func UdevWait(cookie *uint) error { if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) return ErrUdevWait } return nil } +// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger + +// LogInit initializes the logger for the device mapper library. +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + // SetDevDir sets the dev folder for the device mapper library (usually /dev). func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { @@ -313,21 +328,17 @@ func RemoveDevice(name string) error { return err } - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } - defer UdevWait(cookie) + defer UdevWait(&cookie) dmSawBusy = false // reset before the task is run - dmSawEnxio = false if err = task.run(); err != nil { if dmSawBusy { return ErrBusy } - if dmSawEnxio { - return ErrEnxio - } return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) } @@ -347,32 +358,7 @@ func RemoveDeviceDeferred(name string) error { return ErrTaskDeferredRemove } - // set a task cookie and disable library fallback, or else libdevmapper will - // disable udev dm rules and delete the symlink under /dev/mapper by itself, - // even if the removal is deferred by the kernel. - cookie := new(uint) - var flags uint16 - flags = DmUdevDisableLibraryFallback - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - - // libdevmapper and udev relies on System V semaphore for synchronization, - // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. - // So these two function call must come in pairs, otherwise semaphores will - // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all following calls to 'task.SetCookie' - // fail. - // this call will not wait for the deferred removal's final executing, since no - // udev event will be generated, and the semaphore's value will not be incremented - // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(cookie) - - dmSawEnxio = false if err = task.run(); err != nil { - if dmSawEnxio { - return ErrEnxio - } return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) } @@ -441,7 +427,7 @@ func BlockDeviceDiscard(path string) error { // Without this sometimes the remove of the device that happens after // discard fails with EBUSY. - unix.Sync() + syscall.Sync() return nil } @@ -464,13 +450,13 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return fmt.Errorf("devicemapper: Can't add target %s", err) } - cookie := new(uint) + var cookie uint var flags uint16 flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag - if err := task.setCookie(cookie, flags); err != nil { + if err := task.setCookie(&cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(cookie) + defer UdevWait(&cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) @@ -498,7 +484,7 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) + return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) } return nil @@ -652,11 +638,11 @@ func ResumeDevice(name string) error { return err } - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(cookie) + defer UdevWait(&cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceResume %s", err) @@ -750,12 +736,12 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext return fmt.Errorf("devicemapper: Can't add node %s", err) } - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(cookie) + defer UdevWait(&cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) @@ -764,51 +750,51 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext return nil } -// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. -func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + task, err := TaskCreateNamed(deviceTargetMsg, poolName) if task == nil { + if doSuspend { + ResumeDevice(baseName) + } return err } if err := task.setSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) + } return fmt.Errorf("devicemapper: Can't set sector %s", err) } if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + if doSuspend { + ResumeDevice(baseName) + } return fmt.Errorf("devicemapper: Can't set message %s", err) } dmSawExist = false // reset before the task is run if err := task.run(); err != nil { + if doSuspend { + ResumeDevice(baseName) + } // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. if dmSawExist { return ErrDeviceIDExists } - return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) - } - return nil -} + return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) -// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, -func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { - devinfo, _ := GetInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := SuspendDevice(baseName); err != nil { - return err - } - } - - if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { - if doSuspend { - if err2 := ResumeDevice(baseName); err2 != nil { - return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) - } - } - return err } if doSuspend { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go index b540281fab66..76c9756601e6 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -1,49 +1,21 @@ -// +build linux,cgo +// +build linux package devicemapper import "C" import ( - "fmt" "strings" - - "github.com/sirupsen/logrus" ) -// DevmapperLogger defines methods required to register as a callback for -// logging events recieved from devicemapper. Note that devicemapper will send -// *all* logs regardless to callbacks (including debug logs) so it's -// recommended to not spam the console with the outputs. -type DevmapperLogger interface { - // DMLog is the logging callback containing all of the information from - // devicemapper. The interface is identical to the C libdm counterpart. - DMLog(level int, file string, line int, dmError int, message string) -} - -// dmLogger is the current logger in use that is being forwarded our messages. -var dmLogger DevmapperLogger - -// LogInit changes the logging callback called after processing libdm logs for -// error message information. The default logger simply forwards all logs to -// logrus. Calling LogInit(nil) disables the calling of callbacks. -func LogInit(logger DevmapperLogger) { - dmLogger = logger -} - // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" -// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that -// because we are using callbacks, this function will be called for *every* log -// in libdm (even debug ones because there's no way of setting the verbosity -// level for an external logging callback). +// StorageDevmapperLogCallback exports the devmapper log callback for cgo. //export StorageDevmapperLogCallback -func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { +func StorageDevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { msg := C.GoString(message) - - // Track what errno libdm saw, because the library only gives us 0 or 1. - if level < LogLevelDebug { + if level < 7 { if strings.Contains(msg, "busy") { dmSawBusy = true } @@ -61,61 +33,3 @@ func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) } } - -// DefaultLogger is the default logger used by pkg/devicemapper. It forwards -// all logs that are of higher or equal priority to the given level to the -// corresponding logrus level. -type DefaultLogger struct { - // Level corresponds to the highest libdm level that will be forwarded to - // logrus. In order to change this, register a new DefaultLogger. - Level int -} - -// DMLog is the logging callback containing all of the information from -// devicemapper. The interface is identical to the C libdm counterpart. -func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { - if level <= l.Level { - // Forward the log to the correct logrus level, if allowed by dmLogLevel. - logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - switch level { - case LogLevelFatal, LogLevelErr: - logrus.Error(logMsg) - case LogLevelWarn: - logrus.Warn(logMsg) - case LogLevelNotice, LogLevelInfo: - logrus.Info(logMsg) - case LogLevelDebug: - logrus.Debug(logMsg) - default: - // Don't drop any "unknown" levels. - logrus.Info(logMsg) - } - } -} - -// registerLogCallback registers our own logging callback function for libdm -// (which is StorageDevmapperLogCallback). -// -// Because libdm only gives us {0,1} error codes we need to parse the logs -// produced by libdm (to set dmSawBusy and so on). Note that by registering a -// callback using StorageDevmapperLogCallback, libdm will no longer output logs to -// stderr so we have to log everything ourselves. None of this handling is -// optional because we depend on log callbacks to parse the logs, and if we -// don't forward the log information we'll be in a lot of trouble when -// debugging things. -func registerLogCallback() { - LogWithErrnoInit() -} - -func init() { - // Use the default logger by default. We only allow LogLevelFatal by - // default, because internally we mask a lot of libdm errors by retrying - // and similar tricks. Also, libdm is very chatty and we don't want to - // worry users for no reason. - dmLogger = DefaultLogger{ - Level: LogLevelFatal, - } - - // Register as early as possible so we don't miss anything. - registerLogCallback() -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go index 190d83d4999c..e37e02059c61 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -1,9 +1,9 @@ -// +build linux,cgo +// +build linux package devicemapper /* -#define _GNU_SOURCE +#cgo LDFLAGS: -L. -ldevmapper #include #include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? @@ -12,25 +12,19 @@ extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_ static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) { - char *buffer = NULL; - va_list ap; - int ret; - - va_start(ap, f); - ret = vasprintf(&buffer, f, ap); - va_end(ap); - if (ret < 0) { - // memory allocation failed -- should never happen? - return; - } + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); - StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); - free(buffer); + StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); } static void log_with_errno_init() { - dm_log_with_errno_init(log_cb); + dm_log_with_errno_init(log_cb); } */ import "C" @@ -62,6 +56,7 @@ const ( var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct DmSetDevDir = dmSetDevDirFct DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct @@ -231,6 +226,10 @@ func dmCookieSupportedFct() int { return int(C.dm_cookie_supported()) } +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + func logWithErrnoInitFct() { C.log_with_errno_init() } diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go index 7f793c270868..dc361eab7655 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -1,11 +1,14 @@ -// +build linux,cgo,!libdm_no_deferred_remove +// +build linux,!libdm_no_deferred_remove package devicemapper -// #include +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ import "C" -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +// LibraryDeferredRemovalSupport is supported when statically linked. const LibraryDeferredRemovalSupport = true func dmTaskDeferredRemoveFct(task *cdmTask) int { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go deleted file mode 100644 index 7d84508982d0..000000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build - -package devicemapper - -// #cgo pkg-config: devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go index a880fec8c499..4a6665de8605 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -1,8 +1,8 @@ -// +build linux,cgo,libdm_no_deferred_remove +// +build linux,libdm_no_deferred_remove package devicemapper -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +// LibraryDeferredRemovalsupport is not supported when statically linked. const LibraryDeferredRemovalSupport = false func dmTaskDeferredRemoveFct(task *cdmTask) int { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go deleted file mode 100644 index cf7f26a4c671..000000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,static_build - -package devicemapper - -// #cgo pkg-config: --static devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go index 50ea7c48238f..581b57eb86ff 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -1,16 +1,15 @@ -// +build linux,cgo +// +build linux package devicemapper import ( + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil @@ -21,7 +20,7 @@ func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { r[0] = offset r[1] = length - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go deleted file mode 100644 index 7df7f3d43641..000000000000 --- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build linux - -package dmesg - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Dmesg returns last messages from the kernel log, up to size bytes -func Dmesg(size int) []byte { - t := uintptr(3) // SYSLOG_ACTION_READ_ALL - b := make([]byte, size) - amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) - if err != 0 { - return []byte{} - } - return b[:amt] -} diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux_test.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux_test.go deleted file mode 100644 index c5028aac1da9..000000000000 --- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package dmesg - -import ( - "testing" -) - -func TestDmesg(t *testing.T) { - t.Logf("dmesg output follows:\n%v", string(Dmesg(512))) -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index a129e654ea26..6f4a6e613895 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -13,74 +13,98 @@ import ( "github.com/sirupsen/logrus" ) -// PatternMatcher allows checking paths agaist a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool +// exclusion returns true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' } -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { +// empty returns true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { + pattern = strings.TrimSpace(pattern) + if empty(pattern) { continue } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") } - newp.exclusion = true - p = p[1:] - pm.exclusions = true + exceptions = true } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) + patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) } - return pm, nil + + return cleanedPatterns, patternDirs, exceptions, nil } -// Matches matches path against all the patterns. Matches is not safe to be -// called concurrently -func (pm *PatternMatcher) Matches(file string) (bool, error) { +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { matched := false file = filepath.FromSlash(file) parentPath := filepath.Dir(file) parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - for _, pattern := range pm.patterns { + for i, pattern := range patterns { negative := false - if pattern.exclusion { + if exclusion(pattern) { negative = true + pattern = pattern[1:] } - match, err := pattern.match(file) + match, err := regexpMatch(pattern, file) if err != nil { - return false, err + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) } if !match && parentPath != "." { // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), + strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) } } @@ -96,49 +120,28 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) { return matched, nil } -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used used to filter file paths. -type Pattern struct { - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +// +// As per the comment in golangs filepath.Match, on Windows, escaping +// is disabled. Instead, '\\' is treated as path separator. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" - if p.regexp == nil { - if err := p.compile(); err != nil { - return false, filepath.ErrBadPattern - } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err } - b := p.regexp.MatchString(path) - - return b, nil -} - -func (p *Pattern) compile() error { - regStr := "^" - pattern := p.cleanedPattern // Go through the pattern and convert it to a regexp. // We use a scanner so we can support utf-8 chars. var scan scanner.Scanner @@ -158,19 +161,17 @@ func (p *Pattern) compile() error { // is some flavor of "**" scan.Next() - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - if scan.Peek() == scanner.EOF { // is "**EOF" - to align with .gitignore just accept all regStr += ".*" } else { // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() } } else { // is "*" so map it to anything but "/" @@ -179,7 +180,7 @@ func (p *Pattern) compile() error { } else if ch == '?' { // "?" is any char except "/" regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { + } else if strings.Index(".$", string(ch)) != -1 { // Escape some regexp special chars that have no meaning // in golang's filepath.Match regStr += `\` + string(ch) @@ -205,30 +206,14 @@ func (p *Pattern) compile() error { regStr += "$" - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - return nil -} + res, err := regexp.MatchString(regStr, path) -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) + // Map regexp's error to filepath's so no one knows we're not using filepath if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil + err = filepath.ErrBadPattern } - return pm.Matches(file) + return res, err } // CopyFile copies from src to dst until either EOF is reached diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_test.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_test.go index a70d8911fa6e..6df1be89bbb3 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_test.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_test.go @@ -1,7 +1,6 @@ package fileutils import ( - "fmt" "io/ioutil" "os" "path" @@ -9,14 +8,11 @@ import ( "runtime" "strings" "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // CopyFile with invalid src func TestCopyFileWithInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-fileutils-test") + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) @@ -33,7 +29,7 @@ func TestCopyFileWithInvalidSrc(t *testing.T) { // CopyFile with invalid dest func TestCopyFileWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-fileutils-test") + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) @@ -55,7 +51,7 @@ func TestCopyFileWithInvalidDest(t *testing.T) { // CopyFile with same src and dest func TestCopyFileWithSameSrcAndDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-fileutils-test") + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) @@ -76,7 +72,7 @@ func TestCopyFileWithSameSrcAndDest(t *testing.T) { // CopyFile with same src and dest but path is different and not clean func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-fileutils-test") + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) @@ -102,7 +98,7 @@ func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { } func TestCopyFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-fileutils-test") + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") defer os.RemoveAll(tempFolder) if err != nil { t.Fatal(err) @@ -212,7 +208,7 @@ func TestReadSymlinkedDirectoryToFile(t *testing.T) { func TestWildcardMatches(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*"}) - if !match { + if match != true { t.Errorf("failed to get a wildcard match, got %v", match) } } @@ -220,7 +216,7 @@ func TestWildcardMatches(t *testing.T) { // A simple pattern match should return true. func TestPatternMatches(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*.go"}) - if !match { + if match != true { t.Errorf("failed to get a match, got %v", match) } } @@ -228,7 +224,7 @@ func TestPatternMatches(t *testing.T) { // An exclusion followed by an inclusion should return true. func TestExclusionPatternMatchesPatternBefore(t *testing.T) { match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) - if !match { + if match != true { t.Errorf("failed to get true match on exclusion pattern, got %v", match) } } @@ -236,7 +232,7 @@ func TestExclusionPatternMatchesPatternBefore(t *testing.T) { // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) - if match { + if match != false { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } @@ -244,7 +240,7 @@ func TestPatternMatchesFolderExclusions(t *testing.T) { // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) - if match { + if match != false { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } @@ -252,7 +248,7 @@ func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) - if match { + if match != false { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } @@ -260,7 +256,7 @@ func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { // A pattern followed by an exclusion should return false. func TestExclusionPatternMatchesPatternAfter(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) - if match { + if match != false { t.Errorf("failed to get false match on exclusion pattern, got %v", match) } } @@ -268,7 +264,7 @@ func TestExclusionPatternMatchesPatternAfter(t *testing.T) { // A filename evaluating to . should return false. func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { match, _ := Matches(".", []string{"*.go"}) - if match { + if match != false { t.Errorf("failed to get false match on ., got %v", match) } } @@ -281,6 +277,14 @@ func TestSingleExclamationError(t *testing.T) { } } +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + // Matches with no patterns func TestMatchesWithNoPatterns(t *testing.T) { matches, err := Matches("/any/path/there", []string{}) @@ -303,14 +307,17 @@ func TestMatchesWithMalformedPatterns(t *testing.T) { } } -type matchesTestCase struct { - pattern string - text string - pass bool -} - +// Test lots of variants of patterns & strings func TestMatches(t *testing.T) { - tests := []matchesTestCase{ + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + tests := []struct { + pattern string + text string + pass bool + }{ {"**", "file", true}, {"**", "file/", true}, {"**/", "file", true}, // weird one @@ -318,7 +325,7 @@ func TestMatches(t *testing.T) { {"**", "/", true}, {"**/", "/", true}, {"**", "dir/file", true}, - {"**/", "dir/file", true}, + {"**/", "dir/file", false}, {"**", "dir/file/", true}, {"**/", "dir/file/", true}, {"**/**", "dir/file", true}, @@ -328,7 +335,7 @@ func TestMatches(t *testing.T) { {"dir/**", "dir/dir2/file", true}, {"dir/**", "dir/dir2/file/", true}, {"**/dir2/*", "dir/dir2/file", true}, - {"**/dir2/*", "dir/dir2/file/", true}, + {"**/dir2/*", "dir/dir2/file/", false}, {"**/dir2/**", "dir/dir2/dir3/file", true}, {"**/dir2/**", "dir/dir2/dir3/file/", true}, {"**file", "file", true}, @@ -362,6 +369,9 @@ func TestMatches(t *testing.T) { {"abc.def", "abcZdef", false}, {"abc?def", "abcZdef", true}, {"abc?def", "abcdef", false}, + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, {"a\\\\", "a\\", true}, {"**/foo/bar", "foo/bar", true}, {"**/foo/bar", "dir/foo/bar", true}, @@ -369,94 +379,78 @@ func TestMatches(t *testing.T) { {"abc/**", "abc", false}, {"abc/**", "abc/def", true}, {"abc/**", "abc/def/ghi", true}, - {"**/.foo", ".foo", true}, - {"**/.foo", "bar.foo", false}, } - if runtime.GOOS != "windows" { - tests = append(tests, []matchesTestCase{ - {"a\\*b", "a*b", true}, - {"a\\", "a", false}, - {"a\\", "a\\", false}, - }...) + for _, test := range tests { + res, _ := regexpMatch(test.pattern, test.text) + if res != test.pass { + t.Fatalf("Failed: %v - res:%v", test, res) + } } +} - for _, test := range tests { - desc := fmt.Sprintf("pattern=%q text=%q", test.pattern, test.text) - pm, err := NewPatternMatcher([]string{test.pattern}) - require.NoError(t, err, desc) - res, _ := pm.Matches(test.text) - assert.Equal(t, test.pass, res, desc) +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) } } func TestCleanPatterns(t *testing.T) { - patterns := []string{"docs", "config"} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - cleaned := pm.Patterns() + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) if len(cleaned) != 2 { t.Errorf("expected 2 element slice, got %v", len(cleaned)) } } func TestCleanPatternsStripEmptyPatterns(t *testing.T) { - patterns := []string{"docs", "config", ""} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - cleaned := pm.Patterns() + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) if len(cleaned) != 2 { t.Errorf("expected 2 element slice, got %v", len(cleaned)) } } func TestCleanPatternsExceptionFlag(t *testing.T) { - patterns := []string{"docs", "!docs/README.md"} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - if !pm.Exclusions() { - t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) } } func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { - patterns := []string{"docs", " !docs/README.md"} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - if !pm.Exclusions() { - t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) } } func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { - patterns := []string{"docs", "!docs/README.md "} - pm, err := NewPatternMatcher(patterns) - if err != nil { - t.Fatalf("invalid pattern %v", patterns) - } - if !pm.Exclusions() { - t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) } } func TestCleanPatternsErrorSingleException(t *testing.T) { - patterns := []string{"!"} - _, err := NewPatternMatcher(patterns) + _, _, _, err := CleanPatterns([]string{"!"}) if err == nil { t.Errorf("expected error on single exclamation point, got %v", err) } } +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + func TestCreateIfNotExistsDir(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-fileutils-test") + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") if err != nil { t.Fatal(err) } @@ -478,7 +472,7 @@ func TestCreateIfNotExistsDir(t *testing.T) { } func TestCreateIfNotExistsFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "storage-fileutils-test") + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") if err != nil { t.Fatal(err) } @@ -512,7 +506,7 @@ var matchTests = []matchTest{ {"*c", "abc", true, nil}, {"a*", "a", true, nil}, {"a*", "abc", true, nil}, - {"a*", "ab/c", true, nil}, + {"a*", "ab/c", false, nil}, {"a*/b", "abc/b", true, nil}, {"a*/b", "a/c/b", false, nil}, {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, @@ -576,14 +570,14 @@ func TestMatch(t *testing.T) { pattern := tt.pattern s := tt.s if runtime.GOOS == "windows" { - if strings.Contains(pattern, "\\") { + if strings.Index(pattern, "\\") >= 0 { // no escape allowed on windows. continue } pattern = filepath.Clean(pattern) s = filepath.Clean(s) } - ok, err := Matches(s, []string{pattern}) + ok, err := regexpMatch(pattern, s) if ok != tt.match || err != tt.err { t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) } diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go deleted file mode 100644 index e6094b55b717..000000000000 --- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build linux - -package fsutils - -import ( - "fmt" - "io/ioutil" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) - if err != nil { - return "", err - } - if len(children) != 0 { - return "", nil - } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") - if err != nil { - return "", err - } - name := dummyFile.Name() - err = dummyFile.Close() - return name, err -} - -// SupportsDType returns whether the filesystem mounted on path supports d_type -func SupportsDType(path string) (bool, error) { - // locate dummy so that we have at least one dirent - dummy, err := locateDummyIfEmpty(path) - if err != nil { - return false, err - } - if dummy != "" { - defer os.Remove(dummy) - } - - visited := 0 - supportsDType := true - fn := func(ent *unix.Dirent) bool { - visited++ - if ent.Type == unix.DT_UNKNOWN { - supportsDType = false - // stop iteration - return true - } - // continue iteration - return false - } - if err = iterateReadDir(path, fn); err != nil { - return false, err - } - if visited == 0 { - return false, fmt.Errorf("did not hit any dirent during iteration %s", path) - } - return supportsDType, nil -} - -func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { - d, err := os.Open(path) - if err != nil { - return err - } - defer d.Close() - fd := int(d.Fd()) - buf := make([]byte, 4096) - for { - nbytes, err := unix.ReadDirent(fd, buf) - if err != nil { - return err - } - if nbytes == 0 { - break - } - for off := 0; off < nbytes; { - ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) - if stop := fn(ent); stop { - return nil - } - off += int(ent.Reclen) - } - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go similarity index 77% rename from vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go rename to vendor/github.com/containers/storage/pkg/homedir/homedir.go index f2a20ea8f828..8154e83f0c9d 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -1,9 +1,8 @@ -// +build !windows - package homedir import ( "os" + "runtime" "github.com/opencontainers/runc/libcontainer/user" ) @@ -11,6 +10,9 @@ import ( // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } return "HOME" } @@ -19,7 +21,7 @@ func Key() string { // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) - if home == "" { + if home == "" && runtime.GOOS != "windows" { if u, err := user.CurrentUser(); err == nil { return u.Home } @@ -30,5 +32,8 @@ func Get() string { // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } return "~" } diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go deleted file mode 100644 index c001fbecbfb2..000000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build linux - -package homedir - -import ( - "os" - - "github.com/containers/storage/pkg/idtools" -) - -// GetStatic returns the home directory for the current user without calling -// os/user.Current(). This is useful for static-linked binary on glibc-based -// system, because a call to os/user.Current() in a static binary leads to -// segfault due to a glibc issue that won't be fixed in a short term. -// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -func GetStatic() (string, error) { - uid := os.Getuid() - usr, err := idtools.LookupUID(uid) - if err != nil { - return "", err - } - return usr.Home, nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go deleted file mode 100644 index 6b96b856f67b..000000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package homedir - -import ( - "errors" -) - -// GetStatic is not needed for non-linux systems. -// (Precisely, it is needed only for glibc-based linux systems.) -func GetStatic() (string, error) { - return "", errors.New("homedir.GetStatic() is not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go deleted file mode 100644 index fafdb2bbf932..000000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 68a072db2206..6bca466286f7 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -37,56 +37,49 @@ const ( // MkdirAllAs creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. -// Deprecated: Use MkdirAllAndChown func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, true, true) } +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + // MkdirAs creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership -// Deprecated: Use MkdirAndChown with a IDPair func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { return mkdirAs(path, mode, ownerUID, ownerGID, false, true) } -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, true) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, false, true) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, false) -} - // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID } return uid, gid, nil } -// toContainer takes an id mapping, and uses it to translate a +// ToContainer takes an id mapping, and uses it to translate a // host ID to the remapped ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { +func ToContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -99,10 +92,10 @@ func toContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// toHost takes an id mapping and a remapped ID, and translates the +// ToHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { +func ToHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -115,101 +108,26 @@ func toHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// IDPair is a UID and GID pair -type IDPair struct { - UID int - GID int -} - -// IDMappings contains a mappings of UIDs and GIDs -type IDMappings struct { - uids []IDMap - gids []IDMap -} - -// NewIDMappings takes a requested user and group name and +// CreateIDMappings takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func NewIDMappings(username, groupname string) (*IDMappings, error) { +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { subuidRanges, err := parseSubuid(username) if err != nil { - return nil, err + return nil, nil, err } subgidRanges, err := parseSubgid(groupname) if err != nil { - return nil, err + return nil, nil, err } if len(subuidRanges) == 0 { - return nil, fmt.Errorf("No subuid ranges found for user %q", username) + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) } if len(subgidRanges) == 0 { - return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return &IDMappings{ - uids: createIDMap(subuidRanges), - gids: createIDMap(subgidRanges), - }, nil -} - -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { - return &IDMappings{uids: uids, gids: gids} -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i *IDMappings) RootPair() IDPair { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return IDPair{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) - if err != nil { - return target, err - } - } - - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) - } - return target, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) - if err != nil { - return -1, -1, err + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - gid, err := toContainer(pair.GID, i.gids) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i *IDMappings) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 -} - -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) UIDs() []IDMap { - return i.uids -} -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) GIDs() []IDMap { - return i.gids + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil } func createIDMap(subidRanges ranges) []IDMap { diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index b5870506a080..b2cfb05e4f11 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -3,21 +3,10 @@ package idtools import ( - "bytes" - "fmt" - "io" "os" "path/filepath" - "strings" - "sync" "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string ) func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { @@ -29,8 +18,11 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { paths = []string{path} } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } // short-circuit--we were called with an existing directory and chown was requested - return os.Chown(path, ownerUID, ownerGID) + return nil } else if err == nil { // nothing to do; directory path fully exists already and chown was NOT requested return nil @@ -49,7 +41,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { return err } } else { @@ -66,139 +58,3 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown } return nil } - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair IDPair) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(pair.UID), - statInfo.GID() == uint32(pair.GID), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { - return true - } - if isGroup && (perms&0010 == 0010) { - return true - } - if perms&0001 == 0001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) -} - -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) -} - -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) - } - return groups[0], nil -} - -func callGetent(args string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("") - } - out, err := execCmd(getentCmd, args) - if err != nil { - exitCode, errC := system.GetExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - - } - return bytes.NewReader(out), nil -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix_test.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix_test.go index 2463342a65e1..540d3079ee23 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix_test.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix_test.go @@ -7,10 +7,8 @@ import ( "io/ioutil" "os" "path/filepath" + "syscall" "testing" - - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" ) type node struct { @@ -78,9 +76,12 @@ func TestMkdirAllAs(t *testing.T) { } } -func TestMkdirAllAndChownNew(t *testing.T) { +func TestMkdirAllNewAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirnew") - require.NoError(t, err) + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } defer os.RemoveAll(dirName) testTree := map[string]node{ @@ -90,32 +91,49 @@ func TestMkdirAllAndChownNew(t *testing.T) { "lib/x86_64": {45, 45}, "lib/x86_64/share": {1, 1}, } - require.NoError(t, buildTree(dirName, testTree)) - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - err = MkdirAllAndChownNew(filepath.Join(dirName, "usr", "share"), 0755, IDPair{99, 99}) - require.NoError(t, err) + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } testTree["usr/share"] = node{99, 99} verifyTree, err := readTree(dirName, "") - require.NoError(t, err) - require.NoError(t, compareTrees(testTree, verifyTree)) + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } // test 2-deep new directories--both should be owned by the uid/gid pair - err = MkdirAllAndChownNew(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{101, 101}) - require.NoError(t, err) + if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } testTree["lib/some"] = node{101, 101} testTree["lib/some/other"] = node{101, 101} verifyTree, err = readTree(dirName, "") - require.NoError(t, err) - require.NoError(t, compareTrees(testTree, verifyTree)) + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } // test a directory that already exists; should NOT be chowned - err = MkdirAllAndChownNew(filepath.Join(dirName, "usr"), 0755, IDPair{102, 102}) - require.NoError(t, err) + if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } verifyTree, err = readTree(dirName, "") - require.NoError(t, err) - require.NoError(t, compareTrees(testTree, verifyTree)) + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } } func TestMkdirAs(t *testing.T) { @@ -187,8 +205,8 @@ func readTree(base, root string) (map[string]node, error) { } for _, info := range dirInfos { - s := &unix.Stat_t{} - if err := unix.Stat(filepath.Join(base, info.Name()), s); err != nil { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) } tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go index dbf6bc4c94c2..0cad1736692b 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -11,15 +11,8 @@ import ( // Platforms such as Windows do not support the UID/GID concept. So make this // just a wrapper around system.MkdirAll. func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { return err } return nil } - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, pair IDPair) bool { - return true -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go index 9da7975e2c17..4a4aaed04d0f 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -2,6 +2,8 @@ package idtools import ( "fmt" + "os/exec" + "path/filepath" "regexp" "sort" "strconv" @@ -31,6 +33,23 @@ var ( userMod = "usermod" ) +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + // AddNamespaceRangesUser takes a username and uses the standard system // utility to create a system user/group pair used to hold the // /etc/sub{uid,gid} ranges which will be used for user namespace @@ -162,3 +181,8 @@ func wouldOverlap(arange subIDRange, ID int) bool { } return false } + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go deleted file mode 100644 index 9703ecbd9d6a..000000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !windows - -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/containers/storage/pkg/integration/checker/checker.go b/vendor/github.com/containers/storage/pkg/integration/checker/checker.go new file mode 100644 index 000000000000..d1b703a599f5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/integration/checker/checker.go @@ -0,0 +1,46 @@ +// Package checker provides Docker specific implementations of the go-check.Checker interface. +package checker + +import ( + "github.com/go-check/check" + "github.com/vdemeester/shakers" +) + +// As a commodity, we bring all check.Checker variables into the current namespace to avoid having +// to think about check.X versus checker.X. +var ( + DeepEquals = check.DeepEquals + ErrorMatches = check.ErrorMatches + FitsTypeOf = check.FitsTypeOf + HasLen = check.HasLen + Implements = check.Implements + IsNil = check.IsNil + Matches = check.Matches + Not = check.Not + NotNil = check.NotNil + PanicMatches = check.PanicMatches + Panics = check.Panics + + Contains = shakers.Contains + ContainsAny = shakers.ContainsAny + Count = shakers.Count + Equals = shakers.Equals + EqualFold = shakers.EqualFold + False = shakers.False + GreaterOrEqualThan = shakers.GreaterOrEqualThan + GreaterThan = shakers.GreaterThan + HasPrefix = shakers.HasPrefix + HasSuffix = shakers.HasSuffix + Index = shakers.Index + IndexAny = shakers.IndexAny + IsAfter = shakers.IsAfter + IsBefore = shakers.IsBefore + IsBetween = shakers.IsBetween + IsLower = shakers.IsLower + IsUpper = shakers.IsUpper + LessOrEqualThan = shakers.LessOrEqualThan + LessThan = shakers.LessThan + TimeEquals = shakers.TimeEquals + True = shakers.True + TimeIgnore = shakers.TimeIgnore +) diff --git a/vendor/github.com/containers/storage/pkg/integration/dockerCmd_utils.go b/vendor/github.com/containers/storage/pkg/integration/dockerCmd_utils.go new file mode 100644 index 000000000000..fab3e062ddc1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/integration/dockerCmd_utils.go @@ -0,0 +1,78 @@ +package integration + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/go-check/check" +) + +// We use the elongated quote mechanism for quoting error returns as +// the use of strconv.Quote or %q in fmt.Errorf will escape characters. This +// has a big downside on Windows where the args include paths, so instead +// of something like c:\directory\file.txt, the output would be +// c:\\directory\\file.txt. This is highly misleading. +const quote = `"` + +var execCommand = exec.Command + +// DockerCmdWithError executes a docker command that is supposed to fail and returns +// the output, the exit code and the error. +func DockerCmdWithError(dockerBinary string, args ...string) (string, int, error) { + return RunCommandWithOutput(execCommand(dockerBinary, args...)) +} + +// DockerCmdWithStdoutStderr executes a docker command and returns the content of the +// stdout, stderr and the exit code. If a check.C is passed, it will fail and stop tests +// if the error is not nil. +func DockerCmdWithStdoutStderr(dockerBinary string, c *check.C, args ...string) (string, string, int) { + stdout, stderr, status, err := RunCommandWithStdoutStderr(execCommand(dockerBinary, args...)) + if c != nil { + c.Assert(err, check.IsNil, check.Commentf(quote+"%v"+quote+" failed with errors: %s, %v", strings.Join(args, " "), stderr, err)) + } + return stdout, stderr, status +} + +// DockerCmd executes a docker command and returns the output and the exit code. If the +// command returns an error, it will fail and stop the tests. +func DockerCmd(dockerBinary string, c *check.C, args ...string) (string, int) { + out, status, err := RunCommandWithOutput(execCommand(dockerBinary, args...)) + c.Assert(err, check.IsNil, check.Commentf(quote+"%v"+quote+" failed with errors: %s, %v", strings.Join(args, " "), out, err)) + return out, status +} + +// DockerCmdWithTimeout executes a docker command with a timeout, and returns the output, +// the exit code and the error (if any). +func DockerCmdWithTimeout(dockerBinary string, timeout time.Duration, args ...string) (string, int, error) { + out, status, err := RunCommandWithOutputAndTimeout(execCommand(dockerBinary, args...), timeout) + if err != nil { + return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// DockerCmdInDir executes a docker command in a directory and returns the output, the +// exit code and the error (if any). +func DockerCmdInDir(dockerBinary string, path string, args ...string) (string, int, error) { + dockerCommand := execCommand(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := RunCommandWithOutput(dockerCommand) + if err != nil { + return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out) + } + return out, status, err +} + +// DockerCmdInDirWithTimeout executes a docker command in a directory with a timeout and +// returns the output, the exit code and the error (if any). +func DockerCmdInDirWithTimeout(dockerBinary string, timeout time.Duration, path string, args ...string) (string, int, error) { + dockerCommand := execCommand(dockerBinary, args...) + dockerCommand.Dir = path + out, status, err := RunCommandWithOutputAndTimeout(dockerCommand, timeout) + if err != nil { + return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out) + } + return out, status, err +} diff --git a/vendor/github.com/containers/storage/pkg/integration/dockerCmd_utils_test.go b/vendor/github.com/containers/storage/pkg/integration/dockerCmd_utils_test.go new file mode 100644 index 000000000000..3dd5d11461e8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/integration/dockerCmd_utils_test.go @@ -0,0 +1,405 @@ +package integration + +import ( + "fmt" + "os" + "os/exec" + "testing" + + "io/ioutil" + "strings" + "time" + + "github.com/go-check/check" +) + +const dockerBinary = "docker" + +// Setup go-check for this test +func Test(t *testing.T) { + check.TestingT(t) +} + +func init() { + check.Suite(&DockerCmdSuite{}) +} + +type DockerCmdSuite struct{} + +// Fake the exec.Command to use our mock. +func (s *DockerCmdSuite) SetUpTest(c *check.C) { + execCommand = fakeExecCommand +} + +// And bring it back to normal after the test. +func (s *DockerCmdSuite) TearDownTest(c *check.C) { + execCommand = exec.Command +} + +// DockerCmdWithError tests + +func (s *DockerCmdSuite) TestDockerCmdWithError(c *check.C) { + cmds := []struct { + binary string + args []string + expectedOut string + expectedExitCode int + expectedError error + }{ + { + "doesnotexists", + []string{}, + "Command doesnotexists not found.", + 1, + fmt.Errorf("exit status 1"), + }, + { + dockerBinary, + []string{"an", "error"}, + "an error has occurred", + 1, + fmt.Errorf("exit status 1"), + }, + { + dockerBinary, + []string{"an", "exitCode", "127"}, + "an error has occurred with exitCode 127", + 127, + fmt.Errorf("exit status 127"), + }, + { + dockerBinary, + []string{"run", "-ti", "ubuntu", "echo", "hello"}, + "hello", + 0, + nil, + }, + } + for _, cmd := range cmds { + out, exitCode, error := DockerCmdWithError(cmd.binary, cmd.args...) + c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) + c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) + if cmd.expectedError != nil { + c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) + c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) + } else { + c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) + } + } +} + +// DockerCmdWithStdoutStderr tests + +type dockerCmdWithStdoutStderrErrorSuite struct{} + +func (s *dockerCmdWithStdoutStderrErrorSuite) Test(c *check.C) { + // Should fail, the test too + DockerCmdWithStdoutStderr(dockerBinary, c, "an", "error") +} + +type dockerCmdWithStdoutStderrSuccessSuite struct{} + +func (s *dockerCmdWithStdoutStderrSuccessSuite) Test(c *check.C) { + stdout, stderr, exitCode := DockerCmdWithStdoutStderr(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello") + c.Assert(stdout, check.Equals, "hello") + c.Assert(stderr, check.Equals, "") + c.Assert(exitCode, check.Equals, 0) + +} + +func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrError(c *check.C) { + // Run error suite, should fail. + output := String{} + result := check.Run(&dockerCmdWithStdoutStderrErrorSuite{}, &check.RunConf{Output: &output}) + c.Check(result.Succeeded, check.Equals, 0) + c.Check(result.Failed, check.Equals, 1) +} + +func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrSuccess(c *check.C) { + // Run error suite, should fail. + output := String{} + result := check.Run(&dockerCmdWithStdoutStderrSuccessSuite{}, &check.RunConf{Output: &output}) + c.Check(result.Succeeded, check.Equals, 1) + c.Check(result.Failed, check.Equals, 0) +} + +// DockerCmd tests + +type dockerCmdErrorSuite struct{} + +func (s *dockerCmdErrorSuite) Test(c *check.C) { + // Should fail, the test too + DockerCmd(dockerBinary, c, "an", "error") +} + +type dockerCmdSuccessSuite struct{} + +func (s *dockerCmdSuccessSuite) Test(c *check.C) { + stdout, exitCode := DockerCmd(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello") + c.Assert(stdout, check.Equals, "hello") + c.Assert(exitCode, check.Equals, 0) + +} + +func (s *DockerCmdSuite) TestDockerCmdError(c *check.C) { + // Run error suite, should fail. + output := String{} + result := check.Run(&dockerCmdErrorSuite{}, &check.RunConf{Output: &output}) + c.Check(result.Succeeded, check.Equals, 0) + c.Check(result.Failed, check.Equals, 1) +} + +func (s *DockerCmdSuite) TestDockerCmdSuccess(c *check.C) { + // Run error suite, should fail. + output := String{} + result := check.Run(&dockerCmdSuccessSuite{}, &check.RunConf{Output: &output}) + c.Check(result.Succeeded, check.Equals, 1) + c.Check(result.Failed, check.Equals, 0) +} + +// DockerCmdWithTimeout tests + +func (s *DockerCmdSuite) TestDockerCmdWithTimeout(c *check.C) { + cmds := []struct { + binary string + args []string + timeout time.Duration + expectedOut string + expectedExitCode int + expectedError error + }{ + { + "doesnotexists", + []string{}, + 200 * time.Millisecond, + `Command doesnotexists not found.`, + 1, + fmt.Errorf(`"" failed with errors: exit status 1 : "Command doesnotexists not found."`), + }, + { + dockerBinary, + []string{"an", "error"}, + 200 * time.Millisecond, + `an error has occurred`, + 1, + fmt.Errorf(`"an error" failed with errors: exit status 1 : "an error has occurred"`), + }, + { + dockerBinary, + []string{"a", "command", "that", "times", "out"}, + 5 * time.Millisecond, + "", + 0, + fmt.Errorf(`"a command that times out" failed with errors: command timed out : ""`), + }, + { + dockerBinary, + []string{"run", "-ti", "ubuntu", "echo", "hello"}, + 200 * time.Millisecond, + "hello", + 0, + nil, + }, + } + for _, cmd := range cmds { + out, exitCode, error := DockerCmdWithTimeout(cmd.binary, cmd.timeout, cmd.args...) + c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) + c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) + if cmd.expectedError != nil { + c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) + c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) + } else { + c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) + } + } +} + +// DockerCmdInDir tests + +func (s *DockerCmdSuite) TestDockerCmdInDir(c *check.C) { + tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir") + c.Assert(err, check.IsNil) + + cmds := []struct { + binary string + args []string + expectedOut string + expectedExitCode int + expectedError error + }{ + { + "doesnotexists", + []string{}, + `Command doesnotexists not found.`, + 1, + fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder), + }, + { + dockerBinary, + []string{"an", "error"}, + `an error has occurred`, + 1, + fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder), + }, + { + dockerBinary, + []string{"run", "-ti", "ubuntu", "echo", "hello"}, + "hello", + 0, + nil, + }, + } + for _, cmd := range cmds { + // We prepend the arguments with dir:thefolder.. the fake command will check + // that the current workdir is the same as the one we are passing. + args := append([]string{"dir:" + tempFolder}, cmd.args...) + out, exitCode, error := DockerCmdInDir(cmd.binary, tempFolder, args...) + c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) + c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) + if cmd.expectedError != nil { + c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) + c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) + } else { + c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) + } + } +} + +// DockerCmdInDirWithTimeout tests + +func (s *DockerCmdSuite) TestDockerCmdInDirWithTimeout(c *check.C) { + tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir") + c.Assert(err, check.IsNil) + + cmds := []struct { + binary string + args []string + timeout time.Duration + expectedOut string + expectedExitCode int + expectedError error + }{ + { + "doesnotexists", + []string{}, + 200 * time.Millisecond, + `Command doesnotexists not found.`, + 1, + fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder), + }, + { + dockerBinary, + []string{"an", "error"}, + 200 * time.Millisecond, + `an error has occurred`, + 1, + fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder), + }, + { + dockerBinary, + []string{"a", "command", "that", "times", "out"}, + 5 * time.Millisecond, + "", + 0, + fmt.Errorf(`"dir:%s a command that times out" failed with errors: command timed out : ""`, tempFolder), + }, + { + dockerBinary, + []string{"run", "-ti", "ubuntu", "echo", "hello"}, + 200 * time.Millisecond, + "hello", + 0, + nil, + }, + } + for _, cmd := range cmds { + // We prepend the arguments with dir:thefolder.. the fake command will check + // that the current workdir is the same as the one we are passing. + args := append([]string{"dir:" + tempFolder}, cmd.args...) + out, exitCode, error := DockerCmdInDirWithTimeout(cmd.binary, cmd.timeout, tempFolder, args...) + c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) + c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) + if cmd.expectedError != nil { + c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) + c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) + } else { + c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) + } + } +} + +// Helpers :) + +// Type implementing the io.Writer interface for analyzing output. +type String struct { + value string +} + +// The only function required by the io.Writer interface. Will append +// written data to the String.value string. +func (s *String) Write(p []byte) (n int, err error) { + s.value += string(p) + return len(p), nil +} + +// Helper function that mock the exec.Command call (and call the test binary) +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + args := os.Args + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + cmd, args := args[3], args[4:] + // Handle the case where args[0] is dir:... + if len(args) > 0 && strings.HasPrefix(args[0], "dir:") { + expectedCwd := args[0][4:] + if len(args) > 1 { + args = args[1:] + } + cwd, err := os.Getwd() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get workingdir: %v", err) + os.Exit(1) + } + // This checks that the given path is the same as the currend working dire + if expectedCwd != cwd { + fmt.Fprintf(os.Stderr, "Current workdir should be %q, but is %q", expectedCwd, cwd) + } + } + switch cmd { + case dockerBinary: + argsStr := strings.Join(args, " ") + switch argsStr { + case "an exitCode 127": + fmt.Fprintf(os.Stderr, "an error has occurred with exitCode 127") + os.Exit(127) + case "an error": + fmt.Fprintf(os.Stderr, "an error has occurred") + os.Exit(1) + case "a command that times out": + time.Sleep(10 * time.Second) + fmt.Fprintf(os.Stdout, "too long, should be killed") + // A random exit code (that should never happened in tests) + os.Exit(7) + case "run -ti ubuntu echo hello": + fmt.Fprintf(os.Stdout, "hello") + default: + fmt.Fprintf(os.Stdout, "no arguments") + } + default: + fmt.Fprintf(os.Stderr, "Command %s not found.", cmd) + os.Exit(1) + } + // some code here to check arguments perhaps? + os.Exit(0) +} diff --git a/vendor/github.com/containers/storage/pkg/integration/utils.go b/vendor/github.com/containers/storage/pkg/integration/utils.go new file mode 100644 index 000000000000..806d611d1ea1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/integration/utils.go @@ -0,0 +1,361 @@ +package integration + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/stringutils" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} + +// IsKilled process the specified error and returns whether the process was killed or not. +func IsKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} + +// RunCommandWithOutput runs the specified command and returns the combined output (stdout/stderr) +// with the exitCode different from 0 and the error if something bad happened +func RunCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + exitCode = ProcessExitCode(err) + output = string(out) + return +} + +// RunCommandWithStdoutStderr runs the specified command and returns stdout and stderr separately +// with the exitCode different from 0 and the error if something bad happened +func RunCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + var ( + stderrBuffer, stdoutBuffer bytes.Buffer + ) + exitCode = 0 + cmd.Stderr = &stderrBuffer + cmd.Stdout = &stdoutBuffer + err = cmd.Run() + exitCode = ProcessExitCode(err) + + stdout = stdoutBuffer.String() + stderr = stderrBuffer.String() + return +} + +// RunCommandWithOutputForDuration runs the specified command "timeboxed" by the specified duration. +// If the process is still running when the timebox is finished, the process will be killed and . +// It will returns the output with the exitCode different from 0 and the error if something bad happened +// and a boolean whether it has been killed or not. +func RunCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { + var outputBuffer bytes.Buffer + if cmd.Stdout != nil { + err = errors.New("cmd.Stdout already set") + return + } + cmd.Stdout = &outputBuffer + + if cmd.Stderr != nil { + err = errors.New("cmd.Stderr already set") + return + } + cmd.Stderr = &outputBuffer + + // Start the command in the main thread.. + err = cmd.Start() + if err != nil { + err = fmt.Errorf("Fail to start command %v : %v", cmd, err) + } + + type exitInfo struct { + exitErr error + exitCode int + } + + done := make(chan exitInfo, 1) + + go func() { + // And wait for it to exit in the goroutine :) + info := exitInfo{} + info.exitErr = cmd.Wait() + info.exitCode = ProcessExitCode(info.exitErr) + done <- info + }() + + select { + case <-time.After(duration): + killErr := cmd.Process.Kill() + if killErr != nil { + fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, killErr) + } + timedOut = true + case info := <-done: + err = info.exitErr + exitCode = info.exitCode + } + output = outputBuffer.String() + return +} + +var errCmdTimeout = fmt.Errorf("command timed out") + +// RunCommandWithOutputAndTimeout runs the specified command "timeboxed" by the specified duration. +// It returns the output with the exitCode different from 0 and the error if something bad happened or +// if the process timed out (and has been killed). +func RunCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { + var timedOut bool + output, exitCode, timedOut, err = RunCommandWithOutputForDuration(cmd, timeout) + if timedOut { + err = errCmdTimeout + } + return +} + +// RunCommand runs the specified command and returns the exitCode different from 0 +// and the error if something bad happened. +func RunCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Run() + exitCode = ProcessExitCode(err) + return +} + +// RunCommandPipelineWithOutput runs the array of commands with the output +// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). +// It returns the final output, the exitCode different from 0 and the error +// if something bad happened. +func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + if len(cmds) < 2 { + return "", 0, errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + var pipelineError error + defer func() { + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + if err := cmd.Wait(); err != nil { + pipelineError = fmt.Errorf("command %s failed with error: %v", cmd.Path, err) + break + } + } + }() + if pipelineError != nil { + return "", 0, pipelineError + } + + // wait on last cmd + return RunCommandWithOutput(cmds[len(cmds)-1]) +} + +// UnmarshalJSON deserialize a JSON in the given interface. +func UnmarshalJSON(data []byte, result interface{}) error { + if err := json.Unmarshal(data, result); err != nil { + return err + } + + return nil +} + +// ConvertSliceOfStringsToMap converts a slices of string in a map +// with the strings as key and an empty string as values. +func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) +// and returns an error if different. +func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +// ListTar lists the entries of a tar. +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +// RandomTmpDirPath provides a temporary path with rand string appended. +// does not create or checks if it exists. +func RandomTmpDirPath(s string, platform string) string { + tmp := "/tmp" + if platform == "windows" { + tmp = os.Getenv("TEMP") + } + path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) + if platform == "windows" { + return filepath.FromSlash(path) // Using \ + } + return filepath.ToSlash(path) // Using / +} + +// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping +// for interval duration. Returns total read bytes. Send true to the +// stop channel to return before reading to EOF on the reader. +func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + select { + case <-stop: + return + case <-time.After(interval): + } + } +} + +// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func ParseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. +type ChannelBuffer struct { + C chan []byte +} + +// Write implements Writer. +func (c *ChannelBuffer) Write(b []byte) (int, error) { + c.C <- b + return len(b), nil +} + +// Close closes the go channel. +func (c *ChannelBuffer) Close() error { + close(c.C) + return nil +} + +// ReadTimeout reads the content of the channel in the specified byte array with +// the specified duration as timeout. +func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.C: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + +// RunAtDifferentDate runs the specified function with the given time. +// It changes the date of the system, which can led to weird behaviors. +func RunAtDifferentDate(date time.Time, block func()) { + // Layout for date. MMDDhhmmYYYY + const timeLayout = "010203042006" + // Ensure we bring time back to now + now := time.Now().Format(timeLayout) + dateReset := exec.Command("date", now) + defer RunCommand(dateReset) + + dateChange := exec.Command("date", date.Format(timeLayout)) + RunCommand(dateChange) + block() + return +} diff --git a/vendor/github.com/containers/storage/pkg/integration/utils_test.go b/vendor/github.com/containers/storage/pkg/integration/utils_test.go new file mode 100644 index 000000000000..b354ab932d56 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/integration/utils_test.go @@ -0,0 +1,572 @@ +package integration + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { + var lsCmd *exec.Cmd + if runtime.GOOS != "windows" { + lsCmd = exec.Command("ls") + } else { + lsCmd = exec.Command("cmd", "/c", "dir") + } + + err := lsCmd.Run() + if IsKilled(err) { + t.Fatalf("Expected the ls command to not be killed, was.") + } +} + +func TestIsKilledTrueWithKilledProcess(t *testing.T) { + var longCmd *exec.Cmd + if runtime.GOOS != "windows" { + longCmd = exec.Command("top") + } else { + longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") + } + + // Start a command + err := longCmd.Start() + if err != nil { + t.Fatal(err) + } + // Capture the error when *dying* + done := make(chan error, 1) + go func() { + done <- longCmd.Wait() + }() + // Then kill it + longCmd.Process.Kill() + // Get the error + err = <-done + if !IsKilled(err) { + t.Fatalf("Expected the command to be killed, was not.") + } +} + +func TestRunCommandWithOutput(t *testing.T) { + var ( + echoHelloWorldCmd *exec.Cmd + expected string + ) + if runtime.GOOS != "windows" { + echoHelloWorldCmd = exec.Command("echo", "hello", "world") + expected = "hello world\n" + } else { + echoHelloWorldCmd = exec.Command("cmd", "/s", "/c", "echo", "hello", "world") + expected = "hello world\r\n" + } + + out, exitCode, err := RunCommandWithOutput(echoHelloWorldCmd) + if out != expected || exitCode != 0 || err != nil { + t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expected, out, err, exitCode) + } +} + +func TestRunCommandWithOutputError(t *testing.T) { + var ( + p string + wrongCmd *exec.Cmd + expected string + expectedExitCode int + ) + + if runtime.GOOS != "windows" { + p = "$PATH" + wrongCmd = exec.Command("ls", "-z") + expected = `ls: invalid option -- 'z' +Try 'ls --help' for more information. +` + expectedExitCode = 2 + } else { + p = "%PATH%" + wrongCmd = exec.Command("cmd", "/s", "/c", "dir", "/Z") + expected = "Invalid switch - " + strconv.Quote("Z") + ".\r\n" + expectedExitCode = 1 + } + cmd := exec.Command("doesnotexists") + out, exitCode, err := RunCommandWithOutput(cmd) + expectedError := `exec: "doesnotexists": executable file not found in ` + p + if out != "" || exitCode != 127 || err == nil || err.Error() != expectedError { + t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expectedError, out, err, exitCode) + } + + out, exitCode, err = RunCommandWithOutput(wrongCmd) + + if out != expected || exitCode != expectedExitCode || err == nil || !strings.Contains(err.Error(), "exit status "+strconv.Itoa(expectedExitCode)) { + t.Fatalf("Expected command to output %s, got out:xxx%sxxx, err:%v with exitCode %v", expected, out, err, exitCode) + } +} + +func TestRunCommandWithStdoutStderr(t *testing.T) { + echoHelloWorldCmd := exec.Command("echo", "hello", "world") + stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(echoHelloWorldCmd) + expected := "hello world\n" + if stdout != expected || stderr != "" || exitCode != 0 || err != nil { + t.Fatalf("Expected command to output %s, got stdout:%s, stderr:%s, err:%v with exitCode %v", expected, stdout, stderr, err, exitCode) + } +} + +func TestRunCommandWithStdoutStderrError(t *testing.T) { + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + cmd := exec.Command("doesnotexists") + stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(cmd) + expectedError := `exec: "doesnotexists": executable file not found in ` + p + if stdout != "" || stderr != "" || exitCode != 127 || err == nil || err.Error() != expectedError { + t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", "", stdout, stderr, err, exitCode) + } + + wrongLsCmd := exec.Command("ls", "-z") + expected := `ls: invalid option -- 'z' +Try 'ls --help' for more information. +` + + stdout, stderr, exitCode, err = RunCommandWithStdoutStderr(wrongLsCmd) + if stdout != "" && stderr != expected || exitCode != 2 || err == nil || err.Error() != "exit status 2" { + t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", expectedError, stdout, stderr, err, exitCode) + } +} + +func TestRunCommandWithOutputForDurationFinished(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + cmd := exec.Command("ls") + out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 50*time.Millisecond) + if out == "" || exitCode != 0 || timedOut || err != nil { + t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], timedOut:[%v], err:[%v]", out, exitCode, timedOut, err) + } +} + +func TestRunCommandWithOutputForDurationKilled(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done") + out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 500*time.Millisecond) + ones := strings.Split(out, "\n") + if len(ones) != 6 || exitCode != 0 || !timedOut || err != nil { + t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out, but did not : out:[%s], exitCode:%d, timedOut:%v, err:%v", out, exitCode, timedOut, err) + } +} + +func TestRunCommandWithOutputForDurationErrors(t *testing.T) { + cmd := exec.Command("ls") + cmd.Stdout = os.Stdout + if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" { + t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err) + } + cmd = exec.Command("ls") + cmd.Stderr = os.Stderr + if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" { + t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err) + } +} + +func TestRunCommandWithOutputAndTimeoutFinished(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + cmd := exec.Command("ls") + out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 50*time.Millisecond) + if out == "" || exitCode != 0 || err != nil { + t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], err:[%v]", out, exitCode, err) + } +} + +func TestRunCommandWithOutputAndTimeoutKilled(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done") + out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 500*time.Millisecond) + ones := strings.Split(out, "\n") + if len(ones) != 6 || exitCode != 0 || err == nil || err.Error() != "command timed out" { + t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out with an error 'command timed out', but did not : out:[%s], exitCode:%d, err:%v", out, exitCode, err) + } +} + +func TestRunCommandWithOutputAndTimeoutErrors(t *testing.T) { + cmd := exec.Command("ls") + cmd.Stdout = os.Stdout + if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" { + t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err) + } + cmd = exec.Command("ls") + cmd.Stderr = os.Stderr + if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" { + t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err) + } +} + +func TestRunCommand(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + lsCmd := exec.Command("ls") + exitCode, err := RunCommand(lsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) + } + + var expectedError string + + exitCode, err = RunCommand(exec.Command("doesnotexists")) + expectedError = `exec: "doesnotexists": executable file not found in ` + p + if exitCode != 127 || err == nil || err.Error() != expectedError { + t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) + } + wrongLsCmd := exec.Command("ls", "-z") + expected := 2 + expectedError = `exit status 2` + exitCode, err = RunCommand(wrongLsCmd) + if exitCode != expected || err == nil || err.Error() != expectedError { + t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) + } +} + +func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { + _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) + expectedError := "pipeline does not have multiple cmds" + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) + } +} + +func TestRunCommandPipelineWithOutputErrors(t *testing.T) { + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + cmd1 := exec.Command("ls") + cmd1.Stdout = os.Stdout + cmd2 := exec.Command("anything really") + _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) + if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { + t.Fatalf("Expected an error, got %v", err) + } + + cmdWithError := exec.Command("doesnotexists") + cmdCat := exec.Command("cat") + _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) + if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestRunCommandPipelineWithOutput(t *testing.T) { + cmds := []*exec.Cmd{ + // Print 2 characters + exec.Command("echo", "-n", "11"), + // Count the number or char from stdin (previous command) + exec.Command("wc", "-m"), + } + out, exitCode, err := RunCommandPipelineWithOutput(cmds...) + expectedOutput := "2\n" + if out != expectedOutput || exitCode != 0 || err != nil { + t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) + } +} + +// Simple simple test as it is just a passthrough for json.Unmarshal +func TestUnmarshalJSON(t *testing.T) { + emptyResult := struct{}{} + if err := UnmarshalJSON([]byte(""), &emptyResult); err == nil { + t.Fatalf("Expected an error, got nothing") + } + result := struct{ Name string }{} + if err := UnmarshalJSON([]byte(`{"name": "name"}`), &result); err != nil { + t.Fatal(err) + } + if result.Name != "name" { + t.Fatalf("Expected result.name to be 'name', was '%s'", result.Name) + } +} + +func TestConvertSliceOfStringsToMap(t *testing.T) { + input := []string{"a", "b"} + actual := ConvertSliceOfStringsToMap(input) + for _, key := range input { + if _, ok := actual[key]; !ok { + t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) + } + } +} + +func TestCompareDirectoryEntries(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + file1 := filepath.Join(tmpFolder, "file1") + file2 := filepath.Join(tmpFolder, "file2") + os.Create(file1) + os.Create(file2) + + fi1, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi1bis, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi2, err := os.Stat(file2) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + e1 []os.FileInfo + e2 []os.FileInfo + shouldError bool + }{ + // Empty directories + { + []os.FileInfo{}, + []os.FileInfo{}, + false, + }, + // Same FileInfos + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1}, + false, + }, + // Different FileInfos but same names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1bis}, + false, + }, + // Different FileInfos, different names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi2}, + true, + }, + } + for _, elt := range cases { + err := CompareDirectoryEntries(elt.e1, elt.e2) + if elt.shouldError && err == nil { + t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) + } + if !elt.shouldError && err != nil { + t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) + } + } +} + +// FIXME make an "unhappy path" test for ListTar without "panicking" :-) +func TestListTar(t *testing.T) { + // TODO Windows: Figure out why this fails. Should be portable. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows - needs further investigation") + } + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + // Let's create a Tar file + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + reader, err := os.Open(tarFile) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + entries, err := ListTar(reader) + if err != nil { + t.Fatal(err) + } + if len(entries) != 1 && entries[0] != "src" { + t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) + } +} + +func TestRandomTmpDirPath(t *testing.T) { + path := RandomTmpDirPath("something", runtime.GOOS) + + prefix := "/tmp/something" + if runtime.GOOS == "windows" { + prefix = os.Getenv("TEMP") + `\something` + } + expectedSize := len(prefix) + 11 + + if !strings.HasPrefix(path, prefix) { + t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) + } + if len(path) != expectedSize { + t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) + } +} + +func TestConsumeWithSpeed(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 10 { + t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) + } + +} + +func TestConsumeWithSpeedWithStop(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + stopIt := make(chan bool) + + go func() { + time.Sleep(1 * time.Millisecond) + stopIt <- true + }() + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 2 { + t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) + } + +} + +func TestParseCgroupPathsEmpty(t *testing.T) { + cgroupMap := ParseCgroupPaths("") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("\n") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("something:else\nagain:here") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } +} + +func TestParseCgroupPaths(t *testing.T) { + cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") + if len(cgroupMap) != 2 { + t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) + } + if value, ok := cgroupMap["memory"]; !ok || value != "/a" { + t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) + } + if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { + t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) + } +} + +func TestChannelBufferTimeout(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + done := make(chan struct{}, 1) + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + done <- struct{}{} + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 50*time.Millisecond) + if err == nil && err.Error() != "timeout reading from channel" { + t.Fatalf("Expected an error, got %s", err) + } + <-done +} + +func TestChannelBuffer(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 200*time.Millisecond) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("Expected '%s', got '%s'", expected, string(b)) + } +} + +// FIXME doesn't work +// func TestRunAtDifferentDate(t *testing.T) { +// var date string + +// // Layout for date. MMDDhhmmYYYY +// const timeLayout = "20060102" +// expectedDate := "20100201" +// theDate, err := time.Parse(timeLayout, expectedDate) +// if err != nil { +// t.Fatal(err) +// } + +// RunAtDifferentDate(theDate, func() { +// cmd := exec.Command("date", "+%Y%M%d") +// out, err := cmd.Output() +// if err != nil { +// t.Fatal(err) +// } +// date = string(out) +// }) +// } diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer_test.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer_test.go index f6871243865d..41098fa6e7c8 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/buffer_test.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/buffer_test.go @@ -5,81 +5,6 @@ import ( "testing" ) -func TestFixedBufferCap(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 5)} - - n := buf.Cap() - if n != 5 { - t.Fatalf("expected buffer capacity to be 5 bytes, got %d", n) - } -} - -func TestFixedBufferLen(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 10)} - - buf.Write([]byte("hello")) - l := buf.Len() - if l != 5 { - t.Fatalf("expected buffer length to be 5 bytes, got %d", l) - } - - buf.Write([]byte("world")) - l = buf.Len() - if l != 10 { - t.Fatalf("expected buffer length to be 10 bytes, got %d", l) - } - - // read 5 bytes - b := make([]byte, 5) - buf.Read(b) - - l = buf.Len() - if l != 5 { - t.Fatalf("expected buffer length to be 5 bytes, got %d", l) - } - - n, err := buf.Write([]byte("i-wont-fit")) - if n != 0 { - t.Fatalf("expected no bytes to be written to buffer, got %d", n) - } - if err != errBufferFull { - t.Fatalf("expected errBufferFull, got %v", err) - } - - l = buf.Len() - if l != 5 { - t.Fatalf("expected buffer length to still be 5 bytes, got %d", l) - } - - buf.Reset() - l = buf.Len() - if l != 0 { - t.Fatalf("expected buffer length to still be 0 bytes, got %d", l) - } -} - -func TestFixedBufferString(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 10)} - - buf.Write([]byte("hello")) - buf.Write([]byte("world")) - - out := buf.String() - if out != "helloworld" { - t.Fatalf("expected output to be \"helloworld\", got %q", out) - } - - // read 5 bytes - b := make([]byte, 5) - buf.Read(b) - - // test that fixedBuffer.String() only returns the part that hasn't been read - out = buf.String() - if out != "world" { - t.Fatalf("expected output to be \"world\", got %q", out) - } -} - func TestFixedBufferWrite(t *testing.T) { buf := &fixedBuffer{buf: make([]byte, 0, 64)} n, err := buf.Write([]byte("hello")) @@ -96,9 +21,6 @@ func TestFixedBufferWrite(t *testing.T) { } n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) - if n != 59 { - t.Fatalf("expected 59 bytes written before buffer is full, got %d", n) - } if err != errBufferFull { t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) } diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go deleted file mode 100644 index 72a04f34919b..000000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,186 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - bp.mu.Unlock() - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe_test.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe_test.go deleted file mode 100644 index 300fb5f6d52d..000000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package ioutils - -import ( - "crypto/sha1" - "encoding/hex" - "math/rand" - "testing" - "time" -) - -func TestBytesPipeRead(t *testing.T) { - buf := NewBytesPipe() - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - rd := make([]byte, 4) - n, err := buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "1234" { - t.Fatalf("Read %s, but must be %s", rd, "1234") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "5678" { - t.Fatalf("Read %s, but must be %s", rd, "5679") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 2 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) - } - if string(rd[:n]) != "90" { - t.Fatalf("Read %s, but must be %s", rd, "90") - } -} - -func TestBytesPipeWrite(t *testing.T) { - buf := NewBytesPipe() - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - if buf.buf[0].String() != "1234567890" { - t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") - } -} - -// Write and read in different speeds/chunk sizes and check valid data is read. -func TestBytesPipeWriteRandomChunks(t *testing.T) { - cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ - {100, 10, 1}, - {1000, 10, 5}, - {1000, 100, 0}, - {1000, 5, 6}, - {10000, 50, 25}, - } - - testMessage := []byte("this is a random string for testing") - // random slice sizes to read and write - writeChunks := []int{25, 35, 15, 20} - readChunks := []int{5, 45, 20, 25} - - for _, c := range cases { - // first pass: write directly to hash - hash := sha1.New() - for i := 0; i < c.iterations*c.writesPerLoop; i++ { - if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { - t.Fatal(err) - } - } - expected := hex.EncodeToString(hash.Sum(nil)) - - // write/read through buffer - buf := NewBytesPipe() - hash.Reset() - - done := make(chan struct{}) - - go func() { - // random delay before read starts - <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) - for i := 0; ; i++ { - p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) - n, _ := buf.Read(p) - if n == 0 { - break - } - hash.Write(p[:n]) - } - - close(done) - }() - - for i := 0; i < c.iterations; i++ { - for w := 0; w < c.writesPerLoop; w++ { - buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) - } - } - buf.Close() - <-done - - actual := hex.EncodeToString(hash.Sum(nil)) - - if expected != actual { - t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) - } - - } -} - -func BenchmarkBytesPipeWrite(b *testing.B) { - testData := []byte("pretty short line, because why not?") - for i := 0; i < b.N; i++ { - readBuf := make([]byte, 1024) - buf := NewBytesPipe() - go func() { - var err error - for err == nil { - _, err = buf.Read(readBuf) - } - }() - for j := 0; j < 1000; j++ { - buf.Write(testData) - } - buf.Close() - } -} - -func BenchmarkBytesPipeRead(b *testing.B) { - rd := make([]byte, 512) - for i := 0; i < b.N; i++ { - b.StopTimer() - buf := NewBytesPipe() - for j := 0; j < 500; j++ { - buf.Write(make([]byte, 1024)) - } - b.StartTimer() - for j := 0; j < 1000; j++ { - if n, _ := buf.Read(rd); n != 512 { - b.Fatalf("Wrong number of bytes: %d", n) - } - } - } -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fmt.go b/vendor/github.com/containers/storage/pkg/ioutils/fmt.go new file mode 100644 index 000000000000..0b04b0ba3e63 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fmt_test.go b/vendor/github.com/containers/storage/pkg/ioutils/fmt_test.go new file mode 100644 index 000000000000..8968863296da --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go index a56c4626515e..6dc50a03dc04 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -80,83 +80,3 @@ func (w *atomicFileWriter) Close() (retErr error) { } return nil } - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_test.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_test.go index 5d286005d20d..470ca1a6f4ad 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_test.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_test.go @@ -5,21 +5,9 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "testing" ) -var ( - testMode os.FileMode = 0640 -) - -func init() { - // Windows does not support full Linux file mode - if runtime.GOOS == "windows" { - testMode = 0666 - } -} - func TestAtomicWriteToFile(t *testing.T) { tmpDir, err := ioutil.TempDir("", "atomic-writers-test") if err != nil { @@ -28,7 +16,7 @@ func TestAtomicWriteToFile(t *testing.T) { defer os.RemoveAll(tmpDir) expected := []byte("barbaz") - if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, testMode); err != nil { + if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, 0666); err != nil { t.Fatalf("Error writing to file: %v", err) } @@ -37,7 +25,7 @@ func TestAtomicWriteToFile(t *testing.T) { t.Fatalf("Error reading from file: %v", err) } - if !bytes.Equal(actual, expected) { + if bytes.Compare(actual, expected) != 0 { t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) } @@ -45,88 +33,7 @@ func TestAtomicWriteToFile(t *testing.T) { if err != nil { t.Fatalf("Error statting file: %v", err) } - if expected := os.FileMode(testMode); st.Mode() != expected { + if expected := os.FileMode(0666); st.Mode() != expected { t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) } } - -func TestAtomicWriteSetCommit(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") - if err != nil { - t.Fatalf("Error when creating temporary directory: %s", err) - } - defer os.RemoveAll(tmpDir) - - if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { - t.Fatalf("Error creating tmp directory: %s", err) - } - - targetDir := filepath.Join(tmpDir, "target") - ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) - if err != nil { - t.Fatalf("Error creating atomic write set: %s", err) - } - - expected := []byte("barbaz") - if err := ws.WriteFile("foo", expected, testMode); err != nil { - t.Fatalf("Error writing to file: %v", err) - } - - if _, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")); err == nil { - t.Fatalf("Expected error reading file where should not exist") - } - - if err := ws.Commit(targetDir); err != nil { - t.Fatalf("Error committing file: %s", err) - } - - actual, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")) - if err != nil { - t.Fatalf("Error reading from file: %v", err) - } - - if !bytes.Equal(actual, expected) { - t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) - } - - st, err := os.Stat(filepath.Join(targetDir, "foo")) - if err != nil { - t.Fatalf("Error statting file: %v", err) - } - if expected := os.FileMode(testMode); st.Mode() != expected { - t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) - } - -} - -func TestAtomicWriteSetCancel(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") - if err != nil { - t.Fatalf("Error when creating temporary directory: %s", err) - } - defer os.RemoveAll(tmpDir) - - if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { - t.Fatalf("Error creating tmp directory: %s", err) - } - - ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) - if err != nil { - t.Fatalf("Error creating atomic write set: %s", err) - } - - expected := []byte("barbaz") - if err := ws.WriteFile("foo", expected, testMode); err != nil { - t.Fatalf("Error writing to file: %v", err) - } - - if err := ws.Cancel(); err != nil { - t.Fatalf("Error committing file: %s", err) - } - - if _, err := ioutil.ReadFile(filepath.Join(tmpDir, "target", "foo")); err == nil { - t.Fatalf("Expected error reading file where should not exist") - } else if !os.IsNotExist(err) { - t.Fatalf("Unexpected error reading file: %s", err) - } -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/multireader.go b/vendor/github.com/containers/storage/pkg/ioutils/multireader.go new file mode 100644 index 000000000000..0d2d76b47979 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/multireader_test.go b/vendor/github.com/containers/storage/pkg/ioutils/multireader_test.go new file mode 100644 index 000000000000..de495b56da44 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/multireader_test.go @@ -0,0 +1,149 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go index 63f3c07f4637..5a61e6bd4dcd 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/readers.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -4,8 +4,6 @@ import ( "crypto/sha256" "encoding/hex" "io" - - "golang.org/x/net/context" ) type readCloserWrapper struct { @@ -83,72 +81,3 @@ func (r *OnEOFReader) runFunc() { r.Fn = nil } } - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers_test.go b/vendor/github.com/containers/storage/pkg/ioutils/readers_test.go index 86e50d38f9ad..3ccfdf93031a 100644 --- a/vendor/github.com/containers/storage/pkg/ioutils/readers_test.go +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers_test.go @@ -2,13 +2,8 @@ package ioutils import ( "fmt" - "io/ioutil" "strings" "testing" - "time" - - "github.com/stretchr/testify/assert" - "golang.org/x/net/context" ) // Implement io.Reader @@ -36,7 +31,9 @@ func TestReaderErrWrapperReadOnError(t *testing.T) { called = true }) _, err := wrapper.Read([]byte{}) - assert.EqualError(t, err, "error reader always fail") + if err == nil || !strings.Contains(err.Error(), "error reader always fail") { + t.Fatalf("readErrWrapper should returned an error") + } if !called { t.Fatalf("readErrWrapper should have call the anonymous function on failure") } @@ -77,17 +74,3 @@ func (p *perpetualReader) Read(buf []byte) (n int, err error) { } return len(buf), nil } - -func TestCancelReadCloser(t *testing.T) { - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) - for { - var buf [128]byte - _, err := cancelReadCloser.Read(buf[:]) - if err == context.DeadlineExceeded { - break - } else if err != nil { - t.Fatalf("got unexpected error: %v", err) - } - } -} diff --git a/vendor/github.com/containers/storage/pkg/locker/README.md b/vendor/github.com/containers/storage/pkg/locker/README.md deleted file mode 100644 index ad15e89af101..000000000000 --- a/vendor/github.com/containers/storage/pkg/locker/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Locker -===== - -locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however, the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. - - -## Usage - -```go -package important - -import ( - "sync" - "time" - - "github.com/containers/storage/pkg/locker" -) - -type important struct { - locks *locker.Locker - data map[string]interface{} - mu sync.Mutex -} - -func (i *important) Get(name string) interface{} { - i.locks.Lock(name) - defer i.locks.Unlock(name) - return data[name] -} - -func (i *important) Create(name string, data interface{}) { - i.locks.Lock(name) - defer i.locks.Unlock(name) - - i.createImportant(data) - - s.mu.Lock() - i.data[name] = data - s.mu.Unlock() -} - -func (i *important) createImportant(data interface{}) { - time.Sleep(10 * time.Second) -} -``` - -For functions dealing with a given name, always lock at the beginning of the -function (or before doing anything with the underlying state), this ensures any -other function that is dealing with the same name will block. - -When needing to modify the underlying data, use the global lock to ensure nothing -else is modifying it at the same time. -Since name lock is already in place, no reads will occur while the modification -is being performed. - diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/containers/storage/pkg/locker/locker.go deleted file mode 100644 index 0b22ddfab85c..000000000000 --- a/vendor/github.com/containers/storage/pkg/locker/locker.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. -*/ -package locker - -import ( - "errors" - "sync" - "sync/atomic" -) - -// ErrNoSuchLock is returned when the requested lock does not exist -var ErrNoSuchLock = errors.New("no such lock") - -// Locker provides a locking mechanism based on the passed in reference name -type Locker struct { - mu sync.Mutex - locks map[string]*lockCtr -} - -// lockCtr is used by Locker to represent a lock with a given name. -type lockCtr struct { - mu sync.Mutex - // waiters is the number of waiters waiting to acquire the lock - // this is int32 instead of uint32 so we can add `-1` in `dec()` - waiters int32 -} - -// inc increments the number of waiters waiting for the lock -func (l *lockCtr) inc() { - atomic.AddInt32(&l.waiters, 1) -} - -// dec decrements the number of waiters waiting on the lock -func (l *lockCtr) dec() { - atomic.AddInt32(&l.waiters, -1) -} - -// count gets the current number of waiters -func (l *lockCtr) count() int32 { - return atomic.LoadInt32(&l.waiters) -} - -// Lock locks the mutex -func (l *lockCtr) Lock() { - l.mu.Lock() -} - -// Unlock unlocks the mutex -func (l *lockCtr) Unlock() { - l.mu.Unlock() -} - -// New creates a new Locker -func New() *Locker { - return &Locker{ - locks: make(map[string]*lockCtr), - } -} - -// Lock locks a mutex with the given name. If it doesn't exist, one is created -func (l *Locker) Lock(name string) { - l.mu.Lock() - if l.locks == nil { - l.locks = make(map[string]*lockCtr) - } - - nameLock, exists := l.locks[name] - if !exists { - nameLock = &lockCtr{} - l.locks[name] = nameLock - } - - // increment the nameLock waiters while inside the main mutex - // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently - nameLock.inc() - l.mu.Unlock() - - // Lock the nameLock outside the main mutex so we don't block other operations - // once locked then we can decrement the number of waiters for this lock - nameLock.Lock() - nameLock.dec() -} - -// Unlock unlocks the mutex with the given name -// If the given lock is not being waited on by any other callers, it is deleted -func (l *Locker) Unlock(name string) error { - l.mu.Lock() - nameLock, exists := l.locks[name] - if !exists { - l.mu.Unlock() - return ErrNoSuchLock - } - - if nameLock.count() == 0 { - delete(l.locks, name) - } - nameLock.Unlock() - - l.mu.Unlock() - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/locker/locker_test.go b/vendor/github.com/containers/storage/pkg/locker/locker_test.go deleted file mode 100644 index 5a297dd47b6f..000000000000 --- a/vendor/github.com/containers/storage/pkg/locker/locker_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package locker - -import ( - "sync" - "testing" - "time" -) - -func TestLockCounter(t *testing.T) { - l := &lockCtr{} - l.inc() - - if l.waiters != 1 { - t.Fatal("counter inc failed") - } - - l.dec() - if l.waiters != 0 { - t.Fatal("counter dec failed") - } -} - -func TestLockerLock(t *testing.T) { - l := New() - l.Lock("test") - ctr := l.locks["test"] - - if ctr.count() != 0 { - t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) - } - - chDone := make(chan struct{}) - go func() { - l.Lock("test") - close(chDone) - }() - - chWaiting := make(chan struct{}) - go func() { - for range time.Tick(1 * time.Millisecond) { - if ctr.count() == 1 { - close(chWaiting) - break - } - } - }() - - select { - case <-chWaiting: - case <-time.After(3 * time.Second): - t.Fatal("timed out waiting for lock waiters to be incremented") - } - - select { - case <-chDone: - t.Fatal("lock should not have returned while it was still held") - default: - } - - if err := l.Unlock("test"); err != nil { - t.Fatal(err) - } - - select { - case <-chDone: - case <-time.After(3 * time.Second): - t.Fatalf("lock should have completed") - } - - if ctr.count() != 0 { - t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) - } -} - -func TestLockerUnlock(t *testing.T) { - l := New() - - l.Lock("test") - l.Unlock("test") - - chDone := make(chan struct{}) - go func() { - l.Lock("test") - close(chDone) - }() - - select { - case <-chDone: - case <-time.After(3 * time.Second): - t.Fatalf("lock should not be blocked") - } -} - -func TestLockerConcurrency(t *testing.T) { - l := New() - - var wg sync.WaitGroup - for i := 0; i <= 10000; i++ { - wg.Add(1) - go func() { - l.Lock("test") - // if there is a concurrency issue, will very likely panic here - l.Unlock("test") - wg.Done() - }() - } - - chDone := make(chan struct{}) - go func() { - wg.Wait() - close(chDone) - }() - - select { - case <-chDone: - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for locks to complete") - } - - // Since everything has unlocked this should not exist anymore - if ctr, exists := l.locks["test"]; exists { - t.Fatalf("lock should not exist: %v", ctr) - } -} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go index 5f76f331b63d..f166cb2f7786 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -45,5 +45,4 @@ const ( RELATIME = 0 REMOUNT = 0 STRICTATIME = 0 - mntDetach = 0 ) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go index 0425d0dd633c..dc696dce9075 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -1,87 +1,85 @@ package mount import ( - "golang.org/x/sys/unix" + "syscall" ) const ( // RDONLY will mount the file system read-only. - RDONLY = unix.MS_RDONLY + RDONLY = syscall.MS_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. - NOSUID = unix.MS_NOSUID + NOSUID = syscall.MS_NOSUID // NODEV will not interpret character or block special devices on the file // system. - NODEV = unix.MS_NODEV + NODEV = syscall.MS_NODEV // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MS_NOEXEC + NOEXEC = syscall.MS_NOEXEC // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MS_SYNCHRONOUS + SYNCHRONOUS = syscall.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = unix.MS_DIRSYNC + DIRSYNC = syscall.MS_DIRSYNC // REMOUNT will attempt to remount an already-mounted file system. This is // commonly used to change the mount flags for a file system, especially to // make a readonly file system writeable. It does not change device or mount // point. - REMOUNT = unix.MS_REMOUNT + REMOUNT = syscall.MS_REMOUNT // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = unix.MS_MANDLOCK + MANDLOCK = syscall.MS_MANDLOCK // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MS_NOATIME + NOATIME = syscall.MS_NOATIME // NODIRATIME will not update the directory access time. - NODIRATIME = unix.MS_NODIRATIME + NODIRATIME = syscall.MS_NODIRATIME // BIND remounts a subtree somewhere else. - BIND = unix.MS_BIND + BIND = syscall.MS_BIND // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = unix.MS_BIND | unix.MS_REC + RBIND = syscall.MS_BIND | syscall.MS_REC // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = unix.MS_UNBINDABLE + UNBINDABLE = syscall.MS_UNBINDABLE // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = unix.MS_PRIVATE + PRIVATE = syscall.MS_PRIVATE // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC // SLAVE creates a mount which receives propagation from its master, but not // vice versa. - SLAVE = unix.MS_SLAVE + SLAVE = syscall.MS_SLAVE // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = unix.MS_SLAVE | unix.MS_REC + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC // SHARED creates a mount which provides the ability to create mirrors of // that mount such that mounts and unmounts within any of the mirrors // propagate to the other mirrors. - SHARED = unix.MS_SHARED + SHARED = syscall.MS_SHARED // RSHARED marks the entire mount tree as SHARED. - RSHARED = unix.MS_SHARED | unix.MS_REC + RSHARED = syscall.MS_SHARED | syscall.MS_REC // RELATIME updates inode access times relative to modify or change time. - RELATIME = unix.MS_RELATIME + RELATIME = syscall.MS_RELATIME // STRICTATIME allows to explicitly request full atime updates. This makes // it possible for the kernel to default to relatime or noatime but still // allow userspace to override it. - STRICTATIME = unix.MS_STRICTATIME - - mntDetach = unix.MNT_DETACH + STRICTATIME = syscall.MS_STRICTATIME ) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 9ed741e3ff5b..5564f7b3cdea 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -27,5 +27,4 @@ const ( STRICTATIME = 0 SYNCHRONOUS = 0 RDONLY = 0 - mntDetach = 0 ) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go index d3caa16bda2a..66ac4bf4723e 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mount.go +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -1,11 +1,7 @@ package mount import ( - "sort" - "strings" "time" - - "github.com/containers/storage/pkg/fileutils" ) // GetMounts retrieves a list of mounts for the current running process. @@ -21,10 +17,6 @@ func Mounted(mountpoint string) (bool, error) { return false, err } - mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint) - if err != nil { - return false, err - } // Search the table for the mountpoint for _, e := range entries { if e.Mountpoint == mountpoint { @@ -54,11 +46,13 @@ func Mount(device, target, mType, options string) error { // flags.go for supported option flags. func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) - return mount(device, target, mType, uintptr(flag), data) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil } -// Unmount lazily unmounts a filesystem on supported platforms, otherwise -// does a normal unmount. +// Unmount will unmount the target filesystem, so long as it is mounted. func Unmount(target string) error { if mounted, err := Mounted(target); err != nil || !mounted { return err @@ -66,32 +60,6 @@ func Unmount(target string) error { return ForceUnmount(target) } -// RecursiveUnmount unmounts the target and all mounts underneath, starting with -// the deepsest mount first. -func RecursiveUnmount(target string) error { - mounts, err := GetMounts() - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Sort(sort.Reverse(byMountpoint(mounts))) - - for i, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, target) { - continue - } - if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { - if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { - return err - } - // Ignore errors for submounts and continue trying to unmount others - // The final unmount should fail if there ane any submounts remaining - } - } - return nil -} - // ForceUnmount will force an unmount of the target filesystem, regardless if // it is mounted or not. func ForceUnmount(target string) (err error) { @@ -102,5 +70,5 @@ func ForceUnmount(target string) (err error) { } time.Sleep(100 * time.Millisecond) } - return nil + return } diff --git a/vendor/github.com/containers/storage/pkg/mount/mount_unix_test.go b/vendor/github.com/containers/storage/pkg/mount/mount_unix_test.go index 253aff3b8e78..90fa348b2270 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mount_unix_test.go +++ b/vendor/github.com/containers/storage/pkg/mount/mount_unix_test.go @@ -1,4 +1,4 @@ -// +build !windows,!solaris +// +build !windows package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index 814896cc9e6b..bb870e6f59b9 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -13,9 +13,8 @@ import "C" import ( "fmt" "strings" + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) func allocateIOVecs(options []string) []C.struct_iovec { @@ -56,5 +55,5 @@ func mount(device, target, mType string, flag uintptr, data string) error { } func unmount(target string, flag int) error { - return unix.Unmount(target, flag) + return syscall.Unmount(target, flag) } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go index 39c36d472a93..dd4280c77786 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -1,57 +1,21 @@ package mount import ( - "golang.org/x/sys/unix" + "syscall" ) -const ( - // ptypes is the set propagation types. - ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE - - // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | unix.MS_REC | unix.MS_SILENT - - // broflags is the combination of bind and read only - broflags = unix.MS_BIND | unix.MS_RDONLY -) - -// isremount returns true if either device name or flags identify a remount request, false otherwise. -func isremount(device string, flags uintptr) bool { - switch { - // We treat device "" and "none" as a remount request to provide compatibility with - // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&unix.MS_REMOUNT != 0, device == "", device == "none": - return true - default: - return false - } -} - -func mount(device, target, mType string, flags uintptr, data string) error { - oflags := flags &^ ptypes - if !isremount(device, flags) || data != "" { - // Initial call applying all non-propagation flags for mount - // or remount with changed data - if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return err - } +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err } - if flags&ptypes != 0 { - // Change the propagation type. - if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return err - } + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) } - - if oflags&broflags == broflags { - // Remount the bind to apply read only. - return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") - } - return nil } func unmount(target string, flag int) error { - return unix.Unmount(target, flag) + return syscall.Unmount(target, flag) } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux_test.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux_test.go deleted file mode 100644 index 47c03b363146..000000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_linux_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// +build linux - -package mount - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "testing" -) - -func TestMount(t *testing.T) { - if os.Getuid() != 0 { - t.Skip("not root tests would fail") - } - - source, err := ioutil.TempDir("", "mount-test-source-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(source) - - // Ensure we have a known start point by mounting tmpfs with given options - if err := Mount("tmpfs", source, "tmpfs", "private"); err != nil { - t.Fatal(err) - } - defer ensureUnmount(t, source) - validateMount(t, source, "", "", "") - if t.Failed() { - t.FailNow() - } - - target, err := ioutil.TempDir("", "mount-test-target-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(target) - - tests := []struct { - source string - ftype string - options string - expectedOpts string - expectedOptional string - expectedVFS string - }{ - // No options - {"tmpfs", "tmpfs", "", "", "", ""}, - // Default rw / ro test - {source, "", "bind", "", "", ""}, - {source, "", "bind,private", "", "", ""}, - {source, "", "bind,shared", "", "shared", ""}, - {source, "", "bind,slave", "", "master", ""}, - {source, "", "bind,unbindable", "", "unbindable", ""}, - // Read Write tests - {source, "", "bind,rw", "rw", "", ""}, - {source, "", "bind,rw,private", "rw", "", ""}, - {source, "", "bind,rw,shared", "rw", "shared", ""}, - {source, "", "bind,rw,slave", "rw", "master", ""}, - {source, "", "bind,rw,unbindable", "rw", "unbindable", ""}, - // Read Only tests - {source, "", "bind,ro", "ro", "", ""}, - {source, "", "bind,ro,private", "ro", "", ""}, - {source, "", "bind,ro,shared", "ro", "shared", ""}, - {source, "", "bind,ro,slave", "ro", "master", ""}, - {source, "", "bind,ro,unbindable", "ro", "unbindable", ""}, - // Remount tests to change per filesystem options - {"", "", "remount,size=128k", "rw", "", "rw,size=128k"}, - {"", "", "remount,ro,size=128k", "ro", "", "ro,size=128k"}, - } - - for _, tc := range tests { - ftype, options := tc.ftype, tc.options - if tc.ftype == "" { - ftype = "none" - } - if tc.options == "" { - options = "none" - } - - t.Run(fmt.Sprintf("%v-%v", ftype, options), func(t *testing.T) { - if strings.Contains(tc.options, "slave") { - // Slave requires a shared source - if err := MakeShared(source); err != nil { - t.Fatal(err) - } - defer func() { - if err := MakePrivate(source); err != nil { - t.Fatal(err) - } - }() - } - if strings.Contains(tc.options, "remount") { - // create a new mount to remount first - if err := Mount("tmpfs", target, "tmpfs", ""); err != nil { - t.Fatal(err) - } - } - if err := Mount(tc.source, target, tc.ftype, tc.options); err != nil { - t.Fatal(err) - } - defer ensureUnmount(t, target) - validateMount(t, target, tc.expectedOpts, tc.expectedOptional, tc.expectedVFS) - }) - } -} - -// ensureUnmount umounts mnt checking for errors -func ensureUnmount(t *testing.T, mnt string) { - if err := Unmount(mnt); err != nil { - t.Error(err) - } -} - -// validateMount checks that mnt has the given options -func validateMount(t *testing.T, mnt string, opts, optional, vfs string) { - info, err := GetMounts() - if err != nil { - t.Fatal(err) - } - - wantedOpts := make(map[string]struct{}) - if opts != "" { - for _, opt := range strings.Split(opts, ",") { - wantedOpts[opt] = struct{}{} - } - } - - wantedOptional := make(map[string]struct{}) - if optional != "" { - for _, opt := range strings.Split(optional, ",") { - wantedOptional[opt] = struct{}{} - } - } - - wantedVFS := make(map[string]struct{}) - if vfs != "" { - for _, opt := range strings.Split(vfs, ",") { - wantedVFS[opt] = struct{}{} - } - } - - mnts := make(map[int]*Info, len(info)) - for _, mi := range info { - mnts[mi.ID] = mi - } - - for _, mi := range info { - if mi.Mountpoint != mnt { - continue - } - - // Use parent info as the defaults - p := mnts[mi.Parent] - pOpts := make(map[string]struct{}) - if p.Opts != "" { - for _, opt := range strings.Split(p.Opts, ",") { - pOpts[clean(opt)] = struct{}{} - } - } - pOptional := make(map[string]struct{}) - if p.Optional != "" { - for _, field := range strings.Split(p.Optional, ",") { - pOptional[clean(field)] = struct{}{} - } - } - - // Validate Opts - if mi.Opts != "" { - for _, opt := range strings.Split(mi.Opts, ",") { - opt = clean(opt) - if !has(wantedOpts, opt) && !has(pOpts, opt) { - t.Errorf("unexpected mount option %q expected %q", opt, opts) - } - delete(wantedOpts, opt) - } - } - for opt := range wantedOpts { - t.Errorf("missing mount option %q found %q", opt, mi.Opts) - } - - // Validate Optional - if mi.Optional != "" { - for _, field := range strings.Split(mi.Optional, ",") { - field = clean(field) - if !has(wantedOptional, field) && !has(pOptional, field) { - t.Errorf("unexpected optional failed %q expected %q", field, optional) - } - delete(wantedOptional, field) - } - } - for field := range wantedOptional { - t.Errorf("missing optional field %q found %q", field, mi.Optional) - } - - // Validate VFS if set - if vfs != "" { - if mi.VfsOpts != "" { - for _, opt := range strings.Split(mi.VfsOpts, ",") { - opt = clean(opt) - if !has(wantedVFS, opt) { - t.Errorf("unexpected mount option %q expected %q", opt, vfs) - } - delete(wantedVFS, opt) - } - } - for opt := range wantedVFS { - t.Errorf("missing mount option %q found %q", opt, mi.VfsOpts) - } - } - - return - } - - t.Errorf("failed to find mount %q", mnt) -} - -// clean strips off any value param after the colon -func clean(v string) string { - return strings.SplitN(v, ":", 2)[0] -} - -// has returns true if key is a member of m -func has(m map[string]struct{}, key string) bool { - _, ok := m[key] - return ok -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go index 48b86771e7be..c684aa81fcc1 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go @@ -3,9 +3,8 @@ package mount import ( - "unsafe" - "golang.org/x/sys/unix" + "unsafe" ) // #include diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go index ff4cc1d86b6d..e3fc3535e934 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -38,17 +38,3 @@ type Info struct { // VfsOpts represents per super block options. VfsOpts string } - -type byMountpoint []*Info - -func (by byMountpoint) Len() int { - return len(by) -} - -func (by byMountpoint) Less(i, j int) bool { - return by[i].Mountpoint < by[j].Mountpoint -} - -func (by byMountpoint) Swap(i, j int) { - by[i], by[j] = by[j], by[i] -} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux_test.go index f25ab19feddb..c1837942e3a9 100644 --- a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux_test.go +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux_test.go @@ -5,9 +5,8 @@ package mount import ( "os" "path" + "syscall" "testing" - - "golang.org/x/sys/unix" ) // nothing is propagated in or out @@ -310,7 +309,7 @@ func TestSubtreeUnbindable(t *testing.T) { }() // then attempt to mount it to target. It should fail - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != unix.EINVAL { + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not have been bindable", sourceDir) diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go deleted file mode 100644 index 09f6b03cbc0c..000000000000 --- a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build solaris - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - // TODO: Solaris does not support bind mounts. - // Evaluate lofs and also look at the relevant - // mount flags to be supported. - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go index 76e1e499f37e..54a89d28c660 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd solaris openbsd +// +build linux freebsd solaris // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. @@ -6,8 +6,6 @@ package kernel import ( "bytes" - - "github.com/sirupsen/logrus" ) // GetKernelVersion gets the current kernel version. @@ -30,16 +28,3 @@ func GetKernelVersion() (*VersionInfo, error) { return ParseRelease(string(release)) } - -// CheckKernelVersion checks if current kernel is newer than (or equal to) -// the given version. -func CheckKernelVersion(k, major, minor int) bool { - if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) - } else { - if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { - return false - } - } - return true -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go index e59867277676..80fab8ff6424 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -4,9 +4,8 @@ package kernel import ( "fmt" + "syscall" "unsafe" - - "golang.org/x/sys/windows" ) // VersionInfo holds information about the kernel. @@ -25,28 +24,28 @@ func (k *VersionInfo) String() string { func GetKernelVersion() (*VersionInfo, error) { var ( - h windows.Handle + h syscall.Handle dwVersion uint32 err error ) KVI := &VersionInfo{"Unknown", 0, 0, 0} - if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, - windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), 0, - windows.KEY_READ, + syscall.KEY_READ, &h); err != nil { return KVI, err } - defer windows.RegCloseKey(h) + defer syscall.RegCloseKey(h) var buf [1 << 10]uint16 var typ uint32 n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - if err = windows.RegQueryValueEx(h, - windows.StringToUTF16Ptr("BuildLabEx"), + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), nil, &typ, (*byte)(unsafe.Pointer(&buf[0])), @@ -54,11 +53,11 @@ func GetKernelVersion() (*VersionInfo, error) { return KVI, err } - KVI.kvi = windows.UTF16ToString(buf[:]) + KVI.kvi = syscall.UTF16ToString(buf[:]) // Important - docker.exe MUST be manifested for this API to return // the correct information. - if dwVersion, err = windows.GetVersion(); err != nil { + if dwVersion, err = syscall.GetVersion(); err != nil { return KVI, err } diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go index e913fad0013c..bb9b32641e80 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -1,16 +1,18 @@ package kernel -import "golang.org/x/sys/unix" +import ( + "syscall" +) // Utsname represents the system name structure. -// It is passthrough for unix.Utsname in order to make it portable with +// It is passthrough for syscall.Utsname in order to make it portable with // other platforms where it is not available. -type Utsname unix.Utsname +type Utsname syscall.Utsname -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} - if err := unix.Uname(uts); err != nil { + if err := syscall.Uname(uts); err != nil { return nil, err } return uts, nil diff --git a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_linux.go deleted file mode 100644 index e04a3499af9f..000000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_linux.go +++ /dev/null @@ -1,77 +0,0 @@ -// Package operatingsystem provides helper function to get the operating system -// name for different platforms. -package operatingsystem - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/mattn/go-shellwords" -) - -var ( - // file to use to detect if the daemon is running in a container - proc1Cgroup = "/proc/1/cgroup" - - // file to check to determine Operating System - etcOsRelease = "/etc/os-release" - - // used by stateless systems like Clear Linux - altOsRelease = "/usr/lib/os-release" -) - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - osReleaseFile, err := os.Open(etcOsRelease) - if err != nil { - if !os.IsNotExist(err) { - return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) - } - osReleaseFile, err = os.Open(altOsRelease) - if err != nil { - return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) - } - } - defer osReleaseFile.Close() - - var prettyName string - scanner := bufio.NewScanner(osReleaseFile) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "PRETTY_NAME=") { - data := strings.SplitN(line, "=", 2) - prettyNames, err := shellwords.Parse(data[1]) - if err != nil { - return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) - } - if len(prettyNames) != 1 { - return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) - } - prettyName = prettyNames[0] - } - } - if prettyName != "" { - return prettyName, nil - } - // If not set, defaults to PRETTY_NAME="Linux" - // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html - return "Linux", nil -} - -// IsContainerized returns true if we are running inside a container. -func IsContainerized() (bool, error) { - b, err := ioutil.ReadFile(proc1Cgroup) - if err != nil { - return false, err - } - for _, line := range bytes.Split(b, []byte{'\n'}) { - if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { - return true, nil - } - } - return false, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_solaris.go deleted file mode 100644 index d08ad1486042..000000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package operatingsystem - -/* -#include -*/ -import "C" - -import ( - "bytes" - "errors" - "io/ioutil" -) - -var etcOsRelease = "/etc/release" - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - b, err := ioutil.ReadFile(etcOsRelease) - if err != nil { - return "", err - } - if i := bytes.Index(b, []byte("\n")); i >= 0 { - b = bytes.Trim(b[:i], " ") - return string(b), nil - } - return "", errors.New("release not found") -} - -// IsContainerized returns true if we are running inside a container. -func IsContainerized() (bool, error) { - if C.getzoneid() != 0 { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_unix.go deleted file mode 100644 index bc91c3c53375..000000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build freebsd darwin - -package operatingsystem - -import ( - "errors" - "os/exec" -) - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - cmd := exec.Command("uname", "-s") - osName, err := cmd.Output() - if err != nil { - return "", err - } - return string(osName), nil -} - -// IsContainerized returns true if we are running inside a container. -// No-op on FreeBSD and Darwin, always returns false. -func IsContainerized() (bool, error) { - // TODO: Implement jail detection for freeBSD - return false, errors.New("Cannot detect if we are in container") -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_unix_test.go deleted file mode 100644 index e7120c65c47b..000000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_unix_test.go +++ /dev/null @@ -1,247 +0,0 @@ -// +build linux freebsd - -package operatingsystem - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestGetOperatingSystem(t *testing.T) { - var backup = etcOsRelease - - invalids := []struct { - content string - errorExpected string - }{ - { - `PRETTY_NAME=Source Mage GNU/Linux -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", - }, - { - `PRETTY_NAME="Ubuntu Linux -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME is invalid: invalid command line string", - }, - { - `PRETTY_NAME=Ubuntu' -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME is invalid: invalid command line string", - }, - { - `PRETTY_NAME' -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", - }, - } - - valids := []struct { - content string - expected string - }{ - { - `NAME="Ubuntu" -PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, - "Linux", - }, - { - `NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, - "Linux", - }, - { - `NAME=Gentoo -ID=gentoo -PRETTY_NAME="Gentoo/Linux" -ANSI_COLOR="1;32" -HOME_URL="http://www.gentoo.org/" -SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" -BUG_REPORT_URL="https://bugs.gentoo.org/" -`, - "Gentoo/Linux", - }, - { - `NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu 14.04 LTS" -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, - "Ubuntu 14.04 LTS", - }, - { - `NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME='Ubuntu 14.04 LTS'`, - "Ubuntu 14.04 LTS", - }, - { - `PRETTY_NAME=Source -NAME="Source Mage"`, - "Source", - }, - { - `PRETTY_NAME=Source -PRETTY_NAME="Source Mage"`, - "Source Mage", - }, - } - - dir := os.TempDir() - etcOsRelease = filepath.Join(dir, "etcOsRelease") - - defer func() { - os.Remove(etcOsRelease) - etcOsRelease = backup - }() - - for _, elt := range invalids { - if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { - t.Fatalf("failed to write to %s: %v", etcOsRelease, err) - } - s, err := GetOperatingSystem() - if err == nil || err.Error() != elt.errorExpected { - t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) - } - } - - for _, elt := range valids { - if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { - t.Fatalf("failed to write to %s: %v", etcOsRelease, err) - } - s, err := GetOperatingSystem() - if err != nil || s != elt.expected { - t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) - } - } -} - -func TestIsContainerized(t *testing.T) { - var ( - backup = proc1Cgroup - nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope -8:net_cls,net_prio:/ -7:cpuset:/ -6:freezer:/ -5:devices:/init.scope -4:blkio:/init.scope -3:cpu,cpuacct:/init.scope -2:perf_event:/ -1:name=systemd:/init.scope -`) - nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ -13:hugetlb:/ -12:net_prio:/ -11:perf_event:/ -10:bfqio:/ -9:blkio:/ -8:net_cls:/ -7:freezer:/ -6:devices:/ -5:memory:/ -4:cpuacct:/ -3:cpu:/ -2:cpuset:/ -`) - containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -7:net_cls:/ -6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -1:cpuset:/`) - ) - - dir := os.TempDir() - proc1Cgroup = filepath.Join(dir, "proc1Cgroup") - - defer func() { - os.Remove(proc1Cgroup) - proc1Cgroup = backup - }() - - if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err := IsContainerized() - if err != nil { - t.Fatal(err) - } - if inContainer { - t.Fatal("Wrongly assuming containerized") - } - - if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err = IsContainerized() - if err != nil { - t.Fatal(err) - } - if inContainer { - t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") - } - - if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err = IsContainerized() - if err != nil { - t.Fatal(err) - } - if !inContainer { - t.Fatal("Wrongly assuming non-containerized") - } -} - -func TestOsReleaseFallback(t *testing.T) { - var backup = etcOsRelease - var altBackup = altOsRelease - dir := os.TempDir() - etcOsRelease = filepath.Join(dir, "etcOsRelease") - altOsRelease = filepath.Join(dir, "altOsRelease") - - defer func() { - os.Remove(dir) - etcOsRelease = backup - altOsRelease = altBackup - }() - content := `NAME=Gentoo -ID=gentoo -PRETTY_NAME="Gentoo/Linux" -ANSI_COLOR="1;32" -HOME_URL="http://www.gentoo.org/" -SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" -BUG_REPORT_URL="https://bugs.gentoo.org/" -` - if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { - t.Fatalf("failed to write to %s: %v", etcOsRelease, err) - } - s, err := GetOperatingSystem() - if err != nil || s != "Gentoo/Linux" { - t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) - } -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_windows.go deleted file mode 100644 index 5d8b42cc3650..000000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/operatingsystem/operatingsystem_windows.go +++ /dev/null @@ -1,50 +0,0 @@ -package operatingsystem - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c -// for a similar sample - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - - var h windows.Handle - - // Default return value - ret := "Unknown Operating System" - - if err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, - windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - windows.KEY_READ, - &h); err != nil { - return ret, err - } - defer windows.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err := windows.RegQueryValueEx(h, - windows.StringToUTF16Ptr("ProductName"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return ret, err - } - ret = windows.UTF16ToString(buf[:]) - - return ret, nil -} - -// IsContainerized returns true if we are running inside a container. -// No-op on Windows, always returns false. -func IsContainerized() (bool, error) { - return false, nil -} diff --git a/vendor/github.com/containers/storage/pkg/plugins/client.go b/vendor/github.com/containers/storage/pkg/plugins/client.go new file mode 100644 index 000000000000..b4c31c0569c3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/client.go @@ -0,0 +1,188 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/containers/storage/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +const ( + defaultTimeOut = 30 +) + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + clientTransport := transport.NewHTTPTransport(tr, scheme, socket) + return NewClientWithTransport(clientTransport), nil +} + +// NewClientWithTransport creates a new plugin client with a given transport. +func NewClientWithTransport(tr transport.Transport) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + var retries int + start := time.Now() + + for { + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/client_test.go b/vendor/github.com/containers/storage/pkg/plugins/client_test.go new file mode 100644 index 000000000000..56b3d710079d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/client_test.go @@ -0,0 +1,134 @@ +package plugins + +import ( + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + "time" + + "github.com/containers/storage/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setupRemotePluginServer() string { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + return server.URL +} + +func teardownRemotePluginServer() { + if server != nil { + server.Close() + } +} + +func TestFailedConnection(t *testing.T) { + c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true}) + _, err := c.callWithRetry("Service.Method", nil, false) + if err == nil { + t.Fatal("Unexpected successful connection") + } +} + +func TestEchoInputOutput(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(output, m) { + t.Fatalf("Expected %v, was %v\n", m, output) + } + err = c.Call("Test.Echo", nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestBackoff(t *testing.T) { + cases := []struct { + retries int + expTimeOff time.Duration + }{ + {0, time.Duration(1)}, + {1, time.Duration(2)}, + {2, time.Duration(4)}, + {4, time.Duration(16)}, + {6, time.Duration(30)}, + {10, time.Duration(30)}, + } + + for _, c := range cases { + s := c.expTimeOff * time.Second + if d := backoff(c.retries); d != s { + t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) + } + } +} + +func TestAbortRetry(t *testing.T) { + cases := []struct { + timeOff time.Duration + expAbort bool + }{ + {time.Duration(1), false}, + {time.Duration(2), false}, + {time.Duration(10), false}, + {time.Duration(30), true}, + {time.Duration(40), true}, + } + + for _, c := range cases { + s := c.timeOff * time.Second + if a := abort(time.Now(), s); a != c.expAbort { + t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) + } + } +} + +func TestClientScheme(t *testing.T) { + cases := map[string]string{ + "tcp://127.0.0.1:8080": "http", + "unix:///usr/local/plugins/foo": "http", + "http://127.0.0.1:8080": "http", + "https://127.0.0.1:8080": "https", + } + + for addr, scheme := range cases { + u, err := url.Parse(addr) + if err != nil { + t.Fatal(err) + } + s := httpScheme(u) + + if s != scheme { + t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s) + } + } +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/discovery.go b/vendor/github.com/containers/storage/pkg/plugins/discovery.go new file mode 100644 index 000000000000..4cb5a1a3a7f6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/discovery.go @@ -0,0 +1,132 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/containers/storage/plugins" + specsPaths = []string{"/etc/containers/storage/plugins", "/usr/lib/containers/storage/plugins"} +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/discovery_test.go b/vendor/github.com/containers/storage/pkg/plugins/discovery_test.go new file mode 100644 index 000000000000..b8fa38c1d3b7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/discovery_test.go @@ -0,0 +1,119 @@ +package plugins + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Setup(t *testing.T) (string, func()) { + tmpdir, err := ioutil.TempDir("", "docker-test") + if err != nil { + t.Fatal(err) + } + backup := socketsPath + socketsPath = tmpdir + specsPaths = []string{tmpdir} + + return tmpdir, func() { + socketsPath = backup + os.RemoveAll(tmpdir) + } +} + +func TestFileSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []struct { + path string + name string + addr string + fail bool + }{ + // TODO Windows: Factor out the unix:// variants. + {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/containers/storage/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/containers/storage/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(c.name) + if c.fail && err == nil { + continue + } + + if err != nil { + t.Fatal(err) + } + + if p.name != c.name { + t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name) + } + + if p.Addr != c.addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) + } + + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + } +} + +func TestFileJSONSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/containers-storage/plugin", + "TLSConfig": { + "CAFile": "/usr/shared/containers-storage/certs/example-ca.pem", + "CertFile": "/usr/shared/containers-storage/certs/example-cert.pem", + "KeyFile": "/usr/shared/containers-storage/certs/example-key.pem" + } +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if plugin.name != "example" { + t.Fatalf("Expected plugin `plugin-example`, got %s\n", plugin.Name) + } + + if plugin.Addr != "https://example.com/containers-storage/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/containers-storage/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig.CAFile != "/usr/shared/containers-storage/certs/example-ca.pem" { + t.Fatalf("Expected plugin CA `/usr/shared/containers-storage/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) + } + + if plugin.TLSConfig.CertFile != "/usr/shared/containers-storage/certs/example-cert.pem" { + t.Fatalf("Expected plugin Certificate `/usr/shared/containers-storage/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) + } + + if plugin.TLSConfig.KeyFile != "/usr/shared/containers-storage/certs/example-key.pem" { + t.Fatalf("Expected plugin Key `/usr/shared/containers-storage/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) + } +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/discovery_unix_test.go b/vendor/github.com/containers/storage/pkg/plugins/discovery_unix_test.go new file mode 100644 index 000000000000..53e02d285893 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/discovery_unix_test.go @@ -0,0 +1,61 @@ +// +build !windows + +package plugins + +import ( + "fmt" + "net" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestLocalSocket(t *testing.T) { + // TODO Windows: Enable a similar version for Windows named pipes + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []string{ + filepath.Join(tmpdir, "echo.sock"), + filepath.Join(tmpdir, "echo", "echo.sock"), + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { + t.Fatal(err) + } + + l, err := net.Listen("unix", c) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + + pp, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(p, pp) { + t.Fatalf("Expected %v, was %v\n", p, pp) + } + + if p.name != "echo" { + t.Fatalf("Expected plugin `echo`, got %s\n", p.Name) + } + + addr := fmt.Sprintf("unix://%s", c) + if p.Addr != addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) + } + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + l.Close() + } +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/errors.go b/vendor/github.com/containers/storage/pkg/plugins/errors.go new file mode 100644 index 000000000000..7988471026d4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/fixtures/foo.go new file mode 100644 index 000000000000..642cefe8f695 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -0,0 +1,89 @@ +package foo + +import ( + "fmt" + + aliasedio "io" + + "github.com/containers/storage/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" +) + +var ( + errFakeImport = fmt.Errorf("just to import fmt for imports tests") +) + +type wobble struct { + Some string + Val string + Inception *wobble +} + +// Fooer is an empty interface used for tests. +type Fooer interface{} + +// Fooer2 is an interface used for tests. +type Fooer2 interface { + Foo() +} + +// Fooer3 is an interface used for tests. +type Fooer3 interface { + Foo() + Bar(a string) + Baz(a string) (err error) + Qux(a, b string) (val string, err error) + Wobble() (w *wobble) + Wiggle() (w wobble) + WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) +} + +// Fooer4 is an interface used for tests. +type Fooer4 interface { + Foo() error +} + +// Bar is an interface used for tests. +type Bar interface { + Boo(a string, b string) (s string, err error) +} + +// Fooer5 is an interface used for tests. +type Fooer5 interface { + Foo() + Bar +} + +// Fooer6 is an interface used for tests. +type Fooer6 interface { + Foo(a otherfixture.Spaceship) +} + +// Fooer7 is an interface used for tests. +type Fooer7 interface { + Foo(a *otherfixture.Spaceship) +} + +// Fooer8 is an interface used for tests. +type Fooer8 interface { + Foo(a map[string]otherfixture.Spaceship) +} + +// Fooer9 is an interface used for tests. +type Fooer9 interface { + Foo(a map[string]*otherfixture.Spaceship) +} + +// Fooer10 is an interface used for tests. +type Fooer10 interface { + Foo(a []otherfixture.Spaceship) +} + +// Fooer11 is an interface used for tests. +type Fooer11 interface { + Foo(a []*otherfixture.Spaceship) +} + +// Fooer12 is an interface used for tests. +type Fooer12 interface { + Foo(a aliasedio.Reader) +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go new file mode 100644 index 000000000000..1937d1786ced --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go @@ -0,0 +1,4 @@ +package otherfixture + +// Spaceship is a fixture for tests +type Spaceship struct{} diff --git a/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/main.go b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/main.go new file mode 100644 index 000000000000..e77a7d45ff7c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "unicode" + "unicode/utf8" +) + +type stringSet struct { + values map[string]struct{} +} + +func (s stringSet) String() string { + return "" +} + +func (s stringSet) Set(value string) error { + s.values[value] = struct{}{} + return nil +} +func (s stringSet) GetValues() map[string]struct{} { + return s.values +} + +var ( + typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") + rpcName = flag.String("name", *typeName, "RPC name, set if different from type") + inputFile = flag.String("i", "", "input file path") + outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") + + skipFuncs map[string]struct{} + flSkipFuncs = stringSet{make(map[string]struct{})} + + flBuildTags = stringSet{make(map[string]struct{})} +) + +func errorOut(msg string, err error) { + if err == nil { + return + } + fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) + os.Exit(1) +} + +func checkFlags() error { + if *outputFile == "" { + return fmt.Errorf("missing required flag `-o`") + } + if *inputFile == "" { + return fmt.Errorf("missing required flag `-i`") + } + return nil +} + +func main() { + flag.Var(flSkipFuncs, "skip", "skip parsing for function") + flag.Var(flBuildTags, "tag", "build tags to add to generated files") + flag.Parse() + skipFuncs = flSkipFuncs.GetValues() + + errorOut("error", checkFlags()) + + pkg, err := Parse(*inputFile, *typeName) + errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) + + var analysis = struct { + InterfaceType string + RPCName string + BuildTags map[string]struct{} + *ParsedPkg + }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} + var buf bytes.Buffer + + errorOut("parser error", generatedTempl.Execute(&buf, analysis)) + src, err := format.Source(buf.Bytes()) + errorOut("error formatting generated source:\n"+buf.String(), err) + errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) +} + +func toLower(s string) string { + if s == "" { + return "" + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/parser.go b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/parser.go new file mode 100644 index 000000000000..6c547e18cf9f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/parser.go @@ -0,0 +1,263 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "path" + "reflect" + "strings" +) + +var errBadReturn = errors.New("found return arg with no name: all args must be named") + +type errUnexpectedType struct { + expected string + actual interface{} +} + +func (e errUnexpectedType) Error() string { + return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) +} + +// ParsedPkg holds information about a package that has been parsed, +// its name and the list of functions. +type ParsedPkg struct { + Name string + Functions []function + Imports []importSpec +} + +type function struct { + Name string + Args []arg + Returns []arg + Doc string +} + +type arg struct { + Name string + ArgType string + PackageSelector string +} + +func (a *arg) String() string { + return a.Name + " " + a.ArgType +} + +type importSpec struct { + Name string + Path string +} + +func (s *importSpec) String() string { + var ss string + if len(s.Name) != 0 { + ss += s.Name + } + ss += s.Path + return ss +} + +// Parse parses the given file for an interface definition with the given name. +func Parse(filePath string, objName string) (*ParsedPkg, error) { + fs := token.NewFileSet() + pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + p := &ParsedPkg{} + p.Name = pkg.Name.Name + obj, exists := pkg.Scope.Objects[objName] + if !exists { + return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) + } + if obj.Kind != ast.Typ { + return nil, fmt.Errorf("exected type, got %s", obj.Kind) + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} + } + + p.Functions, err = parseInterface(iface) + if err != nil { + return nil, err + } + + // figure out what imports will be needed + imports := make(map[string]importSpec) + for _, f := range p.Functions { + args := append(f.Args, f.Returns...) + for _, arg := range args { + if len(arg.PackageSelector) == 0 { + continue + } + + for _, i := range pkg.Imports { + if i.Name != nil { + if i.Name.Name != arg.PackageSelector { + continue + } + imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} + break + } + + _, name := path.Split(i.Path.Value) + splitName := strings.Split(name, "-") + if len(splitName) > 1 { + name = splitName[len(splitName)-1] + } + // import paths have quotes already added in, so need to remove them for name comparison + name = strings.TrimPrefix(name, `"`) + name = strings.TrimSuffix(name, `"`) + if name == arg.PackageSelector { + imports[i.Path.Value] = importSpec{Path: i.Path.Value} + break + } + } + } + } + + for _, spec := range imports { + p.Imports = append(p.Imports, spec) + } + + return p, nil +} + +func parseInterface(iface *ast.InterfaceType) ([]function, error) { + var functions []function + for _, field := range iface.Methods.List { + switch f := field.Type.(type) { + case *ast.FuncType: + method, err := parseFunc(field) + if err != nil { + return nil, err + } + if method == nil { + continue + } + functions = append(functions, *method) + case *ast.Ident: + spec, ok := f.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} + } + funcs, err := parseInterface(iface) + if err != nil { + fmt.Println(err) + continue + } + functions = append(functions, funcs...) + default: + return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} + } + } + return functions, nil +} + +func parseFunc(field *ast.Field) (*function, error) { + f := field.Type.(*ast.FuncType) + method := &function{Name: field.Names[0].Name} + if _, exists := skipFuncs[method.Name]; exists { + fmt.Println("skipping:", method.Name) + return nil, nil + } + if f.Params != nil { + args, err := parseArgs(f.Params.List) + if err != nil { + return nil, err + } + method.Args = args + } + if f.Results != nil { + returns, err := parseArgs(f.Results.List) + if err != nil { + return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) + } + method.Returns = returns + } + return method, nil +} + +func parseArgs(fields []*ast.Field) ([]arg, error) { + var args []arg + for _, f := range fields { + if len(f.Names) == 0 { + return nil, errBadReturn + } + for _, name := range f.Names { + p, err := parseExpr(f.Type) + if err != nil { + return nil, err + } + args = append(args, arg{name.Name, p.value, p.pkg}) + } + } + return args, nil +} + +type parsedExpr struct { + value string + pkg string +} + +func parseExpr(e ast.Expr) (parsedExpr, error) { + var parsed parsedExpr + switch i := e.(type) { + case *ast.Ident: + parsed.value += i.Name + case *ast.StarExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.value += "*" + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.SelectorExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.pkg = p.value + parsed.value += p.value + "." + parsed.value += i.Sel.Name + case *ast.MapType: + parsed.value += "map[" + p, err := parseExpr(i.Key) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.value += "]" + p, err = parseExpr(i.Value) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.ArrayType: + parsed.value += "[]" + p, err := parseExpr(i.Elt) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + default: + return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} + } + return parsed, nil +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/parser_test.go b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/parser_test.go new file mode 100644 index 000000000000..a1b1ac9567fd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/parser_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const testFixture = "fixtures/foo.go" + +func TestParseEmptyInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 0, len(pkg.Functions)) +} + +func TestParseNonInterfaceType(t *testing.T) { + _, err := Parse(testFixture, "wobble") + if _, ok := err.(errUnexpectedType); !ok { + t.Fatal("expected type error when parsing non-interface type") + } +} + +func TestParseWithOneFunction(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer2") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 1, len(pkg.Functions)) + assertName(t, "Foo", pkg.Functions[0].Name) + assertNum(t, 0, len(pkg.Functions[0].Args)) + assertNum(t, 0, len(pkg.Functions[0].Returns)) +} + +func TestParseWithMultipleFuncs(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer3") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 7, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Bar", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + f = pkg.Functions[2] + assertName(t, "Baz", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[3] + assertName(t, "Qux", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", f.Args[0].Name) + assertName(t, "string", f.Args[0].ArgType) + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "val", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[4] + assertName(t, "Wobble", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "*wobble", arg.ArgType) + + f = pkg.Functions[5] + assertName(t, "Wiggle", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "wobble", arg.ArgType) + + f = pkg.Functions[6] + assertName(t, "WiggleWobble", f.Name) + assertNum(t, 6, len(f.Args)) + assertNum(t, 6, len(f.Returns)) + expectedArgs := [][]string{ + {"a", "[]*wobble"}, + {"b", "[]wobble"}, + {"c", "map[string]*wobble"}, + {"d", "map[*wobble]wobble"}, + {"e", "map[string][]wobble"}, + {"f", "[]*otherfixture.Spaceship"}, + } + for i, arg := range f.Args { + assertName(t, expectedArgs[i][0], arg.Name) + assertName(t, expectedArgs[i][1], arg.ArgType) + } + expectedReturns := [][]string{ + {"g", "map[*wobble]wobble"}, + {"h", "[][]*wobble"}, + {"i", "otherfixture.Spaceship"}, + {"j", "*otherfixture.Spaceship"}, + {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"}, + {"l", "[]otherfixture.Spaceship"}, + } + for i, ret := range f.Returns { + assertName(t, expectedReturns[i][0], ret.Name) + assertName(t, expectedReturns[i][1], ret.ArgType) + } +} + +func TestParseWithUnamedReturn(t *testing.T) { + _, err := Parse(testFixture, "Fooer4") + if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { + t.Fatalf("expected ErrBadReturn, got %v", err) + } +} + +func TestEmbeddedInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer5") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 2, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Boo", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[0] + assertName(t, "s", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) +} + +func TestParsedImports(t *testing.T) { + cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"} + for _, testCase := range cases { + pkg, err := Parse(testFixture, testCase) + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + importPath := strings.Split(pkg.Imports[0].Path, "/") + assertName(t, "otherfixture\"", importPath[len(importPath)-1]) + assertName(t, "", pkg.Imports[0].Name) + } +} + +func TestAliasedImports(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer12") + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + assertName(t, "aliasedio", pkg.Imports[0].Name) +} + +func assertName(t *testing.T, expected, actual string) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) + } +} + +func assertNum(t *testing.T, expected, actual int) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) + } +} + +func fatalOut(t *testing.T, msg string) { + _, file, ln, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/template.go b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/template.go new file mode 100644 index 000000000000..50ed9293c116 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/pluginrpc-gen/template.go @@ -0,0 +1,118 @@ +package main + +import ( + "strings" + "text/template" +) + +func printArgs(args []arg) string { + var argStr []string + for _, arg := range args { + argStr = append(argStr, arg.String()) + } + return strings.Join(argStr, ", ") +} + +func buildImports(specs []importSpec) string { + if len(specs) == 0 { + return `import "errors"` + } + imports := "import(\n" + imports += "\t\"errors\"\n" + for _, i := range specs { + imports += "\t" + i.String() + "\n" + } + imports += ")" + return imports +} + +func marshalType(t string) string { + switch t { + case "error": + // convert error types to plain strings to ensure the values are encoded/decoded properly + return "string" + default: + return t + } +} + +func isErr(t string) bool { + switch t { + case "error": + return true + default: + return false + } +} + +// Need to use this helper due to issues with go-vet +func buildTag(s string) string { + return "+build " + s +} + +var templFuncs = template.FuncMap{ + "printArgs": printArgs, + "marshalType": marshalType, + "isErr": isErr, + "lower": strings.ToLower, + "title": title, + "tag": buildTag, + "imports": buildImports, +} + +func title(s string) string { + if strings.ToLower(s) == "id" { + return "ID" + } + return strings.Title(s) +} + +var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` +// generated code - DO NOT EDIT +{{ range $k, $v := .BuildTags }} + // {{ tag $k }} {{ end }} + +package {{ .Name }} + +{{ imports .Imports }} + +type client interface{ + Call(string, interface{}, interface{}) error +} + +type {{ .InterfaceType }}Proxy struct { + client +} + +{{ range .Functions }} + type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ + {{ range .Args }} + {{ title .Name }} {{ .ArgType }} {{ end }} + } + + type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ + {{ range .Returns }} + {{ title .Name }} {{ marshalType .ArgType }} {{ end }} + } + + func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { + var( + req {{ $.InterfaceType }}Proxy{{ .Name }}Request + ret {{ $.InterfaceType }}Proxy{{ .Name }}Response + ) + {{ range .Args }} + req.{{ title .Name }} = {{ lower .Name }} {{ end }} + if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { + return + } + {{ range $r := .Returns }} + {{ if isErr .ArgType }} + if ret.{{ title .Name }} != "" { + {{ lower .Name }} = errors.New(ret.{{ title .Name }}) + } {{ end }} + {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} + + return + } +{{ end }} +`)) diff --git a/vendor/github.com/containers/storage/pkg/plugins/plugins.go b/vendor/github.com/containers/storage/pkg/plugins/plugins.go new file mode 100644 index 000000000000..e197c3fdcfc6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/plugins.go @@ -0,0 +1,271 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Storage discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/containers/storage/plugins, whereas spec files can be +// located either under /etc/containers/storage/plugins or +// /usr/lib/containers/storage/plugins. This is handled by the Registry +// interface, which lets you list all plugins or get a plugin by its name if it +// exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of subsystems which this plugin implements. As of +// this writing, the known subsystem is "GraphDriver". +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + extpointHandlers = make(map[string]func(string, *Client)) +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a storage plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // error produced by activation + activateErr error + // specifies if the activation sequence is completed (not if it is successful or not) + activated bool + // wait for activation to finish + activateWait *sync.Cond +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + if p.activated { + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + p.activated = true + + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + + for _, iface := range m.Implements { + handler, handled := extpointHandlers[iface] + if !handled { + continue + } + handler(p.name, p.client) + } + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if err := p.waitActive(); err != nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + extpointHandlers[iface] = fn +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + if pl, ok := storage.plugins[name]; ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/transport/http.go b/vendor/github.com/containers/storage/pkg/plugins/transport/http.go new file mode 100644 index 000000000000..5be146af6574 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/containers/storage/pkg/plugins/transport/transport.go b/vendor/github.com/containers/storage/pkg/plugins/transport/transport.go new file mode 100644 index 000000000000..d7f1e2100c43 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise_test.go b/vendor/github.com/containers/storage/pkg/promise/promise_test.go deleted file mode 100644 index 287213b504e3..000000000000 --- a/vendor/github.com/containers/storage/pkg/promise/promise_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package promise - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestGo(t *testing.T) { - errCh := Go(functionWithError) - er := <-errCh - require.EqualValues(t, "Error Occurred", er.Error()) - - noErrCh := Go(functionWithNoError) - er = <-noErrCh - require.Nil(t, er) -} - -func functionWithError() (err error) { - return errors.New("Error Occurred") -} -func functionWithNoError() (err error) { - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/random/random.go b/vendor/github.com/containers/storage/pkg/random/random.go new file mode 100644 index 000000000000..70de4d1304c5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/github.com/containers/storage/pkg/random/random_test.go b/vendor/github.com/containers/storage/pkg/random/random_test.go new file mode 100644 index 000000000000..cf405f78cbde --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/random/random_test.go @@ -0,0 +1,22 @@ +package random + +import ( + "math/rand" + "sync" + "testing" +) + +// for go test -v -race +func TestConcurrency(t *testing.T) { + rnd := rand.New(NewSource()) + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + rnd.Int63() + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md deleted file mode 100644 index 6658f69b69d7..000000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# reexec - -The `reexec` package facilitates the busybox style reexec of the docker binary that we require because -of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of -the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 05319eacc93d..3c3a73a9d57b 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -5,8 +5,6 @@ package reexec import ( "os/exec" "syscall" - - "golang.org/x/sys/unix" ) // Self returns the path to the current process's binary. @@ -15,7 +13,7 @@ func Self() string { return "/proc/self/exe" } -// Command returns *exec.Cmd which has Path as current binary. Also it setting +// Command returns *exec.Cmd which have Path as current binary. Also it setting // SysProcAttr.Pdeathsig to SIGTERM. // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). @@ -24,7 +22,7 @@ func Command(args ...string) *exec.Cmd { Path: Self(), Args: args, SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: unix.SIGTERM, + Pdeathsig: syscall.SIGTERM, }, } } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 778a720e3b91..b70edcb316bc 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,4 @@ -// +build freebsd solaris darwin +// +build freebsd solaris package reexec @@ -12,7 +12,7 @@ func Self() string { return naiveSelf() } -// Command returns *exec.Cmd which has Path as current binary. +// Command returns *exec.Cmd which have Path as current binary. // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 76edd824273e..9aed004e864b 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd,!solaris,!darwin +// +build !linux,!windows,!freebsd,!solaris package reexec @@ -6,7 +6,7 @@ import ( "os/exec" ) -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +// Command is unsupported on operating systems apart from Linux and Windows. func Command(args ...string) *exec.Cmd { return nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index ca871c4227ed..8d65e0ae1adc 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -12,7 +12,7 @@ func Self() string { return naiveSelf() } -// Command returns *exec.Cmd which has Path as current binary. +// Command returns *exec.Cmd which have Path as current binary. // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec_test.go b/vendor/github.com/containers/storage/pkg/reexec/reexec_test.go deleted file mode 100644 index 39e87a4a27d5..000000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package reexec - -import ( - "os" - "os/exec" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func init() { - Register("reexec", func() { - panic("Return Error") - }) - Init() -} - -func TestRegister(t *testing.T) { - defer func() { - if r := recover(); r != nil { - require.Equal(t, `reexec func already registered under name "reexec"`, r) - } - }() - Register("reexec", func() {}) -} - -func TestCommand(t *testing.T) { - cmd := Command("reexec") - w, err := cmd.StdinPipe() - require.NoError(t, err, "Error on pipe creation: %v", err) - defer w.Close() - - err = cmd.Start() - require.NoError(t, err, "Error on re-exec cmd: %v", err) - err = cmd.Wait() - require.EqualError(t, err, "exit status 2") -} - -func TestNaiveSelf(t *testing.T) { - if os.Getenv("TEST_CHECK") == "1" { - os.Exit(2) - } - cmd := exec.Command(naiveSelf(), "-test.run=TestNaiveSelf") - cmd.Env = append(os.Environ(), "TEST_CHECK=1") - err := cmd.Start() - require.NoError(t, err, "Unable to start command") - err = cmd.Wait() - require.EqualError(t, err, "exit status 2") - - os.Args[0] = "mkdir" - assert.NotEqual(t, naiveSelf(), os.Args[0]) -} diff --git a/vendor/github.com/containers/storage/pkg/stringid/README.md b/vendor/github.com/containers/storage/pkg/stringid/README.md deleted file mode 100644 index 37a5098fd988..000000000000 --- a/vendor/github.com/containers/storage/pkg/stringid/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go index a0c7c42a0508..74dfaaaa7605 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -2,25 +2,19 @@ package stringid import ( - cryptorand "crypto/rand" + "crypto/rand" "encoding/hex" - "fmt" "io" - "math" - "math/big" - "math/rand" "regexp" "strconv" "strings" - "time" + + "github.com/containers/storage/pkg/random" ) const shortLen = 12 -var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) -) +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") // IsShortID determines if an arbitrary string *looks like* a short ID. func IsShortID(id string) bool { @@ -35,14 +29,19 @@ func TruncateID(id string) string { if i := strings.IndexRune(id, ':'); i >= 0 { id = id[i+1:] } - if len(id) > shortLen { - id = id[:shortLen] + trimTo := shortLen + if len(id) < shortLen { + trimTo = len(id) } - return id + return id[:trimTo] } -func generateID(r io.Reader) string { +func generateID(crypto bool) string { b := make([]byte, 32) + r := random.Reader + if crypto { + r = rand.Reader + } for { if _, err := io.ReadFull(r, b); err != nil { panic(err) // This shouldn't happen @@ -60,40 +59,13 @@ func generateID(r io.Reader) string { // GenerateRandomID returns a unique id. func GenerateRandomID() string { - return generateID(cryptorand.Reader) + return generateID(true) + } // GenerateNonCryptoID generates unique id without using cryptographically // secure sources of random. // It helps you to save entropy. func GenerateNonCryptoID() string { - return generateID(readerFunc(rand.Read)) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} - -func init() { - // safely set the seed globally so we generate random ids. Tries to use a - // crypto seed before falling back to time. - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - - rand.Seed(seed) -} - -type readerFunc func(p []byte) (int, error) - -func (fn readerFunc) Read(p []byte) (int, error) { - return fn(p) + return generateID(false) } diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid_test.go b/vendor/github.com/containers/storage/pkg/stringid/stringid_test.go index 8ff6b4383d4d..bcb136549552 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid_test.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid_test.go @@ -13,26 +13,10 @@ func TestGenerateRandomID(t *testing.T) { } } -func TestGenerateNonCryptoID(t *testing.T) { - id := GenerateNonCryptoID() - - if len(id) != 64 { - t.Fatalf("Id returned is incorrect: %s", id) - } -} - func TestShortenId(t *testing.T) { - id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2" - truncID := TruncateID(id) - if truncID != "90435eec5c4e" { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestShortenSha256Id(t *testing.T) { - id := "sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba" + id := GenerateRandomID() truncID := TruncateID(id) - if truncID != "4e38e38c8ce0" { + if len(truncID) != 12 { t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) } } diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md deleted file mode 100644 index b3e454573c32..000000000000 --- a/vendor/github.com/containers/storage/pkg/stringutils/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with strings diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go index 8c4c39875ebb..078ceaf2f7ab 100644 --- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -5,6 +5,8 @@ import ( "bytes" "math/rand" "strings" + + "github.com/containers/storage/pkg/random" ) // GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. @@ -13,7 +15,7 @@ func GenerateRandomAlphaOnlyString(n int) string { letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]byte, n) for i := range b { - b[i] = letters[rand.Intn(len(letters))] + b[i] = letters[random.Rand.Intn(len(letters))] } return string(b) } @@ -30,26 +32,12 @@ func GenerateRandomASCIIString(n int) string { return string(res) } -// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). -// For maxlen of 3 and lower, no ellipsis is appended. -func Ellipsis(s string, maxlen int) string { - r := []rune(s) - if len(r) <= maxlen { - return s - } - if maxlen <= 3 { - return string(r[:maxlen]) - } - return string(r[:maxlen-3]) + "..." -} - // Truncate truncates a string to maxlen. func Truncate(s string, maxlen int) string { - r := []rune(s) - if len(r) <= maxlen { + if len(s) <= maxlen { return s } - return string(r[:maxlen]) + return s[:maxlen] } // InSlice tests whether a string is contained in a slice of strings or not. @@ -75,7 +63,7 @@ func quote(word string, buf *bytes.Buffer) { for i := 0; i < len(word); i++ { b := word[i] if b == '\'' { - // Replace literal ' with a close ', a \', and an open ' + // Replace literal ' with a close ', a \', and a open ' buf.WriteString("'\\''") } else { buf.WriteByte(b) diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils_test.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils_test.go index 8af2bdcc0bc2..fec59450bce4 100644 --- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils_test.go +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils_test.go @@ -57,40 +57,24 @@ func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { } } -func TestEllipsis(t *testing.T) { - str := "t🐳ststring" - newstr := Ellipsis(str, 3) - if newstr != "t🐳s" { - t.Fatalf("Expected t🐳s, got %s", newstr) - } - newstr = Ellipsis(str, 8) - if newstr != "t🐳sts..." { - t.Fatalf("Expected tests..., got %s", newstr) - } - newstr = Ellipsis(str, 20) - if newstr != "t🐳ststring" { - t.Fatalf("Expected t🐳ststring, got %s", newstr) - } -} - func TestTruncate(t *testing.T) { - str := "t🐳ststring" + str := "teststring" newstr := Truncate(str, 4) - if newstr != "t🐳st" { - t.Fatalf("Expected t🐳st, got %s", newstr) + if newstr != "test" { + t.Fatalf("Expected test, got %s", newstr) } newstr = Truncate(str, 20) - if newstr != "t🐳ststring" { - t.Fatalf("Expected t🐳ststring, got %s", newstr) + if newstr != "teststring" { + t.Fatalf("Expected teststring, got %s", newstr) } } func TestInSlice(t *testing.T) { - slice := []string{"t🐳st", "in", "slice"} + slice := []string{"test", "in", "slice"} - test := InSlice(slice, "t🐳st") + test := InSlice(slice, "test") if !test { - t.Fatalf("Expected string t🐳st to be in slice") + t.Fatalf("Expected string test to be in slice") } test = InSlice(slice, "SLICE") if !test { diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go index 056d19954d63..7637f12e1a7d 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -2,9 +2,26 @@ package system import ( "os" + "syscall" "time" + "unsafe" ) +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_test.go b/vendor/github.com/containers/storage/pkg/system/chtimes_test.go index fcd644f5c65d..5c87df32a278 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_test.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_test.go @@ -10,7 +10,7 @@ import ( // prepareTempFile creates a temporary file in a temporary directory. func prepareTempFile(t *testing.T) (string, string) { - dir, err := ioutil.TempDir("", "storage-system-test") + dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix_test.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix_test.go index 6ec9a7173c0b..0aafe1d84502 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix_test.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix_test.go @@ -9,7 +9,7 @@ import ( "time" ) -// TestChtimesLinux tests Chtimes access time on a tempfile on Linux +// TestChtimes tests Chtimes access time on a tempfile on Linux func TestChtimesLinux(t *testing.T) { file, dir := prepareTempFile(t) defer os.RemoveAll(dir) diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go index 45428c141ca9..29458684659b 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -3,26 +3,25 @@ package system import ( + "syscall" "time" - - "golang.org/x/sys/windows" ) //setCTime will set the create time on a file. On Windows, this requires //calling SetFileTime and explicitly including the create time. func setCTime(path string, ctime time.Time) error { - ctimespec := windows.NsecToTimespec(ctime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) + ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) + pathp, e := syscall.UTF16PtrFromString(path) if e != nil { return e } - h, e := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) if e != nil { return e } - defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) - return windows.SetFileTime(h, &c, nil, nil) + defer syscall.Close(h) + c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) + return syscall.SetFileTime(h, &c, nil, nil) } diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows_test.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows_test.go index 72d8a10619d7..be57558e1b3c 100644 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows_test.go +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows_test.go @@ -9,7 +9,7 @@ import ( "time" ) -// TestChtimesWindows tests Chtimes access time on a tempfile on Windows +// TestChtimes tests Chtimes access time on a tempfile on Windows func TestChtimesWindows(t *testing.T) { file, dir := prepareTempFile(t) defer os.RemoveAll(dir) diff --git a/vendor/github.com/containers/storage/pkg/system/events_windows.go b/vendor/github.com/containers/storage/pkg/system/events_windows.go new file mode 100644 index 000000000000..04e2de78714f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go deleted file mode 100644 index 60f0514b1ddd..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/exitcode.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go index 102565f7601a..c14feb849659 100644 --- a/vendor/github.com/containers/storage/pkg/system/filesys.go +++ b/vendor/github.com/containers/storage/pkg/system/filesys.go @@ -3,19 +3,13 @@ package system import ( - "io/ioutil" "os" "path/filepath" ) -// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return MkdirAll(path, perm, sddl) -} - // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode, sddl string) error { +func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } @@ -23,45 +17,3 @@ func MkdirAll(path string, perm os.FileMode, sddl string) error { func IsAbs(path string) bool { return filepath.IsAbs(path) } - -// The functions below here are wrappers for the equivalents in the os and ioutils packages. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// TempFileSequential creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return ioutil.TempFile(dir, prefix) -} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go index a61b53d0ba35..16823d5517ce 100644 --- a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go @@ -6,44 +6,17 @@ import ( "os" "path/filepath" "regexp" - "strconv" "strings" - "sync" "syscall" - "time" - "unsafe" - - winio "github.com/Microsoft/go-winio" - "golang.org/x/sys/windows" -) - -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System - SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// with an appropriate SDDL defined ACL. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return mkdirall(path, true, sddl) -} - // MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode, sddl string) error { - return mkdirall(path, false, sddl) -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, applyACL bool, sddl string) error { +func MkdirAll(path string, perm os.FileMode) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } - // The rest of this method is largely copied from os.MkdirAll and should be kept + // The rest of this method is copied from os.MkdirAll and should be kept // as-is to ensure compatibility. // Fast path: if we can tell whether path is a directory or file, stop with success or error. @@ -72,19 +45,14 @@ func mkdirall(path string, applyACL bool, sddl string) error { if j > 1 { // Create parent - err = mkdirall(path[0:j-1], false, sddl) + err = MkdirAll(path[0:j-1], perm) if err != nil { return err } } - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if applyACL { - err = mkdirWithACL(path, sddl) - } else { - err = os.Mkdir(path, 0) - } - + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) if err != nil { // Handle arguments like "foo/." by // double-checking that directory doesn't exist. @@ -97,35 +65,6 @@ func mkdirall(path string, applyACL bool, sddl string) error { return nil } -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and windows.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string, sddl string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - - namep, err := windows.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} - } - return nil -} - // IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, // golang filepath.IsAbs does not consider a path \windows\system32 as absolute // as it doesn't start with a drive-letter/colon combination. However, in @@ -141,158 +80,3 @@ func IsAbs(path string) bool { } return true } - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for TempFileSequential -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go deleted file mode 100644 index 17935088dedf..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "time" - "unsafe" -) - -// Used by chtimes -var maxTime time.Time - -func init() { - // chtimes initialization - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go deleted file mode 100644 index 019c66441ce9..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/init_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import "os" - -// LCOWSupported determines if Linux Containers on Windows are supported. -// Note: This feature is in development (06/17) and enabled through an -// environment variable. At a future time, it will be enabled based -// on build number. @jhowardmsft -var lcowSupported = false - -func init() { - // LCOW initialization - if os.Getenv("LCOW_SUPPORTED") != "" { - lcowSupported = true - } - -} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go deleted file mode 100644 index cff33bb40856..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return false -} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go deleted file mode 100644 index e54d01e696b5..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return lcowSupported -} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat.go similarity index 100% rename from vendor/github.com/containers/storage/pkg/system/lstat_unix.go rename to vendor/github.com/containers/storage/pkg/system/lstat.go diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go index e51df0dafeb1..49e87eb40bae 100644 --- a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -1,14 +1,25 @@ +// +build windows + package system -import "os" +import ( + "os" +) // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { return nil, err } - return fromStatT(&fi) + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil } diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go index 925776e789bf..313c601b1255 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -7,7 +7,6 @@ import ( "unsafe" ) -// #cgo CFLAGS: -std=c99 // #cgo LDFLAGS: -lkstat // #include // #include @@ -90,7 +89,7 @@ func ReadMemInfo() (*MemInfo, error) { if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) + return nil, fmt.Errorf("Error getting system memory info %v\n", err) } meminfo := &MemInfo{} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go index 883944a4c536..d46642598cf6 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -1,13 +1,12 @@ package system import ( + "syscall" "unsafe" - - "golang.org/x/sys/windows" ) var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") ) diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go index af79a6538333..73958182b4eb 100644 --- a/vendor/github.com/containers/storage/pkg/system/mknod.go +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -3,13 +3,13 @@ package system import ( - "golang.org/x/sys/unix" + "syscall" ) // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) + return syscall.Mknod(path, mode, dev) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go deleted file mode 100644 index f634a6be673e..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/path.go +++ /dev/null @@ -1,21 +0,0 @@ -package system - -import "runtime" - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -func DefaultPathEnv(platform string) string { - if runtime.GOOS == "windows" { - if platform != runtime.GOOS && LCOWSupported() { - return defaultUnixPathEnv - } - // Deliberately empty on Windows containers on Windows as the default path will be set by - // the container. Docker has no context of what the default path should be. - return "" - } - return defaultUnixPathEnv - -} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go index f3762e69d36a..c607c4db09f2 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -2,6 +2,11 @@ package system +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. This is a no-op on Linux. func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go index aab891522db8..cbfe2c1576ce 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -8,11 +8,15 @@ import ( "strings" ) +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter // is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with +// need the path in this syntax so that it can ultimately be contatenated with // a Windows long-path which doesn't support drive-letters. Examples: // C: --> Fail // C:\ --> \ diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go deleted file mode 100644 index 26c8b42c1769..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/process_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build linux freebsd solaris darwin - -package system - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := unix.Kill(pid, syscall.Signal(0)) - if err == nil || err == unix.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - unix.Kill(pid, unix.SIGKILL) -} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go deleted file mode 100644 index fc03c3e6b6db..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ /dev/null @@ -1,80 +0,0 @@ -package system - -import ( - "os" - "syscall" - "time" - - "github.com/containers/storage/pkg/mount" - "github.com/pkg/errors" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 5 - - // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) - - for { - err := os.RemoveAll(dir) - if err == nil { - return err - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if pe.Err != syscall.EBUSY { - return err - } - - if mounted, _ := mount.Mounted(pe.Path); mounted { - if e := mount.Unmount(pe.Path); e != nil { - if mounted, _ := mount.Mounted(pe.Path); mounted { - return errors.Wrapf(e, "error while removing %s", dir) - } - } - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/rm_test.go b/vendor/github.com/containers/storage/pkg/system/rm_test.go deleted file mode 100644 index 98371d4a64c4..000000000000 --- a/vendor/github.com/containers/storage/pkg/system/rm_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/containers/storage/pkg/mount" -) - -func TestEnsureRemoveAllNotExist(t *testing.T) { - // should never return an error for a non-existent path - if err := EnsureRemoveAll("/non/existent/path"); err != nil { - t.Fatal(err) - } -} - -func TestEnsureRemoveAllWithDir(t *testing.T) { - dir, err := ioutil.TempDir("", "test-ensure-removeall-with-dir") - if err != nil { - t.Fatal(err) - } - if err := EnsureRemoveAll(dir); err != nil { - t.Fatal(err) - } -} - -func TestEnsureRemoveAllWithFile(t *testing.T) { - tmp, err := ioutil.TempFile("", "test-ensure-removeall-with-dir") - if err != nil { - t.Fatal(err) - } - tmp.Close() - if err := EnsureRemoveAll(tmp.Name()); err != nil { - t.Fatal(err) - } -} - -func TestEnsureRemoveAllWithMount(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("mount not supported on Windows") - } - - dir1, err := ioutil.TempDir("", "test-ensure-removeall-with-dir1") - if err != nil { - t.Fatal(err) - } - dir2, err := ioutil.TempDir("", "test-ensure-removeall-with-dir2") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir2) - - bindDir := filepath.Join(dir1, "bind") - if err := os.MkdirAll(bindDir, 0755); err != nil { - t.Fatal(err) - } - - if err := mount.Mount(dir2, bindDir, "none", "bind"); err != nil { - t.Fatal(err) - } - - done := make(chan struct{}) - go func() { - err = EnsureRemoveAll(dir1) - close(done) - }() - - select { - case <-done: - if err != nil { - t.Fatal(err) - } - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for EnsureRemoveAll to finish") - } - - if _, err := os.Stat(dir1); !os.IsNotExist(err) { - t.Fatalf("expected %q to not exist", dir1) - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat.go similarity index 74% rename from vendor/github.com/containers/storage/pkg/system/stat_unix.go rename to vendor/github.com/containers/storage/pkg/system/stat.go index 91c7d121cc7f..087034c5ec55 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/stat.go @@ -47,14 +47,7 @@ func (s StatT) Mtim() syscall.Timespec { return s.mtim } -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() } diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go index 715f05b9387f..d0fb6f15190a 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -1,6 +1,8 @@ package system -import "syscall" +import ( + "syscall" +) // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { @@ -11,3 +13,15 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil } + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go index 1939f95181a0..8b1eded1387a 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -1,6 +1,8 @@ package system -import "syscall" +import ( + "syscall" +) // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { @@ -12,8 +14,20 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mtim: s.Mtim}, nil } -// FromStatT converts a syscall.Stat_t type to a system.Stat_t type -// This is exposed on Linux as pkg/archive/changes uses it. +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) } + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go index b607dea946f8..3c3b71fb2196 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -1,8 +1,10 @@ package system -import "syscall" +import ( + "syscall" +) -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +// fromStatT creates a system.StatT type from a syscall.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go index b607dea946f8..0216985a2525 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -1,8 +1,12 @@ +// +build solaris + package system -import "syscall" +import ( + "syscall" +) -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +// fromStatT creates a system.StatT type from a syscall.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), @@ -11,3 +15,20 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtim}, nil } + +// FromStatT loads a system.StatT from a syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_unsupported.go similarity index 59% rename from vendor/github.com/containers/storage/pkg/system/stat_darwin.go rename to vendor/github.com/containers/storage/pkg/system/stat_unsupported.go index 715f05b9387f..f53e9de4d1a1 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_unsupported.go @@ -1,8 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris,!openbsd + package system -import "syscall" +import ( + "syscall" +) -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +// fromStatT creates a system.StatT type from a syscall.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index 6c63972682a4..39490c625c03 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -1,3 +1,5 @@ +// +build windows + package system import ( @@ -6,11 +8,18 @@ import ( ) // StatT type contains status of a file. It contains metadata -// like permission, size, etc about a file. +// like name, permission, size, etc about a file. type StatT struct { - mode os.FileMode - size int64 - mtim time.Time + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name } // Size returns file's size. @@ -20,30 +29,15 @@ func (s StatT) Size() int64 { // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) + return s.mode } -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - return fromStatT(&fi) +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime } -// fromStatT converts a os.FileInfo type to a system.StatT type -func fromStatT(fi *os.FileInfo) (*StatT, error) { - return &StatT{ - size: (*fi).Size(), - mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir } diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go index 49dbdd378173..3ae912846844 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -2,12 +2,12 @@ package system -import "golang.org/x/sys/unix" +import "syscall" // Unmount is a platform-specific helper function to call // the unmount syscall. func Unmount(dest string) error { - return unix.Unmount(dest, 0) + return syscall.Unmount(dest, 0) } // CommandLineToArgv should not be used on Unix. diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go index 23e9b207c756..7aaab7e7fbea 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -1,16 +1,15 @@ package system import ( + "syscall" "unsafe" "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" ) var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") ) // OSVersion is a wrapper for Windows version information @@ -42,7 +41,7 @@ type osVersionInfoEx struct { func GetOSVersion() OSVersion { var err error osv := OSVersion{} - osv.Version, err = windows.GetVersion() + osv.Version, err = syscall.GetVersion() if err != nil { // GetVersion never fails. panic(err) @@ -54,8 +53,6 @@ func GetOSVersion() OSVersion { } // IsWindowsClient returns true if the SKU is client -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. func IsWindowsClient() bool { osviex := &osVersionInfoEx{OSVersionInfoSize: 284} r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) @@ -67,22 +64,6 @@ func IsWindowsClient() bool { return osviex.ProductType == verNTWorkstation } -// IsIoTCore returns true if the currently running image is based off of -// Windows 10 IoT Core. -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsIoTCore() bool { - var returnedProductType uint32 - r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) - if r1 == 0 { - logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) - return false - } - const productIoTUAP = 0x0000007B - const productIoTUAPCommercial = 0x00000083 - return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial -} - // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { @@ -93,20 +74,20 @@ func Unmount(dest string) error { func CommandLineToArgv(commandLine string) ([]string, error) { var argc int32 - argsPtr, err := windows.UTF16PtrFromString(commandLine) + argsPtr, err := syscall.UTF16PtrFromString(commandLine) if err != nil { return nil, err } - argv, err := windows.CommandLineToArgv(argsPtr, &argc) + argv, err := syscall.CommandLineToArgv(argsPtr, &argc) if err != nil { return nil, err } - defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) newArgs := make([]string, argc) for i, v := range (*argv)[:argc] { - newArgs[i] = string(windows.UTF16ToString((*v)[:])) + newArgs[i] = string(syscall.UTF16ToString((*v)[:])) } return newArgs, nil diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go index 5a10eda5afb0..3d0146b01ad4 100644 --- a/vendor/github.com/containers/storage/pkg/system/umask.go +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -3,11 +3,11 @@ package system import ( - "golang.org/x/sys/unix" + "syscall" ) // Umask sets current process's file mode creation mask to newmask // and returns oldmask. func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil + return syscall.Umask(newmask), nil } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go b/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go new file mode 100644 index 000000000000..0a16197544db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_darwin.go @@ -0,0 +1,8 @@ +package system + +import "syscall" + +// LUtimesNano is not supported by darwin platform. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go index 6a77524376db..e2eac3b553e0 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -3,20 +3,18 @@ package system import ( "syscall" "unsafe" - - "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { var _path *byte - _path, err := unix.BytePtrFromString(path) + _path, err := syscall.BytePtrFromString(path) if err != nil { return err } - if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { return err } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go index edc588a63f36..fc8a1aba95cb 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -3,21 +3,22 @@ package system import ( "syscall" "unsafe" - - "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 var _path *byte - _path, err := unix.BytePtrFromString(path) + _path, err := syscall.BytePtrFromString(path) if err != nil { return err } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { return err } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unix_test.go b/vendor/github.com/containers/storage/pkg/system/utimes_unix_test.go index af6a5442450a..1ee0d099f910 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unix_test.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unix_test.go @@ -12,7 +12,7 @@ import ( // prepareFiles creates files for testing in the temp directory func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "storage-system-test") + dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) } @@ -41,7 +41,7 @@ func TestLUtimesNano(t *testing.T) { t.Fatal(err) } - ts := []syscall.Timespec{{Sec: 0, Nsec: 0}, {Sec: 0, Nsec: 0}} + ts := []syscall.Timespec{{0, 0}, {0, 0}} if err := LUtimesNano(symlink, ts); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go index 139714544d02..50c3a04364d2 100644 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -1,10 +1,10 @@ -// +build !linux,!freebsd +// +build !linux,!freebsd,!darwin package system import "syscall" -// LUtimesNano is only supported on linux and freebsd. +// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 98b111be426e..d2e2c0579983 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -1,29 +1,63 @@ package system -import "golang.org/x/sys/unix" +import ( + "syscall" + "unsafe" +) // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - if errno == unix.ENODATA { + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { return nil, nil } - if errno == unix.ERANGE { + if errno == syscall.ERANGE { dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) } - if errno != nil { + if errno != 0 { return nil, errno } return dest[:sz], nil } +var _zero uintptr + // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { - return unix.Lsetxattr(path, attr, data, flags) + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil } diff --git a/vendor/github.com/containers/storage/pkg/testutil/assert/assert.go b/vendor/github.com/containers/storage/pkg/testutil/assert/assert.go new file mode 100644 index 000000000000..5b0dcce67aff --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/testutil/assert/assert.go @@ -0,0 +1,70 @@ +// Package assert contains functions for making assertions in unit tests +package assert + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" +) + +// TestingT is an interface which defines the methods of testing.T that are +// required by this package +type TestingT interface { + Fatalf(string, ...interface{}) +} + +// Equal compare the actual value to the expected value and fails the test if +// they are not equal. +func Equal(t TestingT, actual, expected interface{}) { + if expected != actual { + fatal(t, fmt.Sprintf("Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual)) + } +} + +//EqualStringSlice compares two slices and fails the test if they do not contain +// the same items. +func EqualStringSlice(t TestingT, actual, expected []string) { + if len(actual) != len(expected) { + t.Fatalf("Expected (length %d): %q\nActual (length %d): %q", + len(expected), expected, len(actual), actual) + } + for i, item := range actual { + if item != expected[i] { + t.Fatalf("Slices differ at element %d, expected %q got %q", + i, expected[i], item) + } + } +} + +// NilError asserts that the error is nil, otherwise it fails the test. +func NilError(t TestingT, err error) { + if err != nil { + fatal(t, fmt.Sprintf("Expected no error, got: %s", err.Error())) + } +} + +// Error asserts that error is not nil, and contains the expected text, +// otherwise it fails the test. +func Error(t TestingT, err error, contains string) { + if err == nil { + fatal(t, "Expected an error, but error was nil") + } + + if !strings.Contains(err.Error(), contains) { + fatal(t, fmt.Sprintf("Expected error to contain '%s', got '%s'", contains, err.Error())) + } +} + +// Contains asserts that the string contains a substring, otherwise it fails the +// test. +func Contains(t TestingT, actual, contains string) { + if !strings.Contains(actual, contains) { + fatal(t, fmt.Sprintf("Expected '%s' to contain '%s'", actual, contains)) + } +} + +func fatal(t TestingT, msg string) { + _, file, line, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), line, msg) +} diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index c90b45c7fded..519a0abbf01f 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -1,9 +1,8 @@ -# See man 5 containers-storage.conf for more information # The "container storage" table contains all of the server options. [storage] # Default Storage Driver -driver = "" +driver = "overlay" # Temporary storage location runroot = "/var/run/containers/storage" @@ -16,10 +15,3 @@ graphroot = "/var/lib/containers/storage" # Must be comma separated list. additionalimagestores = [ ] - -# Size is used to set a maximum size of the container image. Only supported by -# certain container storage drivers. -size = "" - -# OverrideKernelCheck tells the driver to ignore kernel checks based on kernel version -override_kernel_check = "false" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index f2cd0e2eae78..43efa9408566 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -20,11 +20,41 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) var ( + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = errors.New("error loading storage metadata") + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = errors.New("that ID is already in use") + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = errors.New("that name is already in use") + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = errors.New("would-be parent layer is a container") + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = errors.New("identifier is not a container") + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = errors.New("identifier is not an image") + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = errors.New("identifier is not a layer") + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = errors.New("layer has children") + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = errors.New("layer is in use by an image") + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = errors.New("image is in use by a container") + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = errors.New("size is not known") + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // DefaultStoreOptions is a reasonable default set of options. DefaultStoreOptions StoreOptions stores []*store @@ -87,10 +117,6 @@ type ROBigDataStore interface { // data associated with this ID, if it has previously been set. BigDataSize(id, key string) (int64, error) - // BigDataDigest retrieves the digest of a (potentially large) piece of - // data associated with this ID, if it has previously been set. - BigDataDigest(id, key string) (digest.Digest, error) - // BigDataNames() returns a list of the names of previously-stored pieces of // data. BigDataNames(id string) ([]string, error) @@ -157,6 +183,31 @@ type Store interface { // by the Store. GraphDriver() (drivers.Driver, error) + // LayerStore obtains and returns a handle to the writeable layer store + // object used by the Store. Accessing this store directly will bypass + // locking and synchronization, so use it with care. + LayerStore() (LayerStore, error) + + // ROLayerStore obtains additional read/only layer store objects used + // by the Store. Accessing these stores directly will bypass locking + // and synchronization, so use them with care. + ROLayerStores() ([]ROLayerStore, error) + + // ImageStore obtains and returns a handle to the writable image store + // object used by the Store. Accessing this store directly will bypass + // locking and synchronization, so use it with care. + ImageStore() (ImageStore, error) + + // ROImageStores obtains additional read/only image store objects used + // by the Store. Accessing these stores directly will bypass locking + // and synchronization, so use them with care. + ROImageStores() ([]ROImageStore, error) + + // ContainerStore obtains and returns a handle to the container store + // object used by the Store. Accessing this store directly will bypass + // locking and synchronization, so use it with care. + ContainerStore() (ContainerStore, error) + // CreateLayer creates a new layer in the underlying storage driver, // optionally having the specified ID (one will be assigned if none is // specified), with the specified layer (or no layer) as its parent, @@ -174,7 +225,7 @@ type Store interface { // if reexec.Init { // return // } - PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) // CreateImage creates a new image, optionally with the specified ID // (one will be assigned if none is specified), with optional names, @@ -289,7 +340,7 @@ type Store interface { // if reexec.Init { // return // } - ApplyDiff(to string, diff io.Reader) (int64, error) + ApplyDiff(to string, diff archive.Reader) (int64, error) // LayersByCompressedDigest returns a slice of the layers with the // specified compressed digest value recorded for them. @@ -316,7 +367,6 @@ type Store interface { Names(id string) ([]string, error) // SetNames changes the list of names for a layer, image, or container. - // Duplicate names are removed from the list automatically. SetNames(id string, names []string) error // ListImageBigData retrieves a list of the (possibly large) chunks of @@ -331,10 +381,6 @@ type Store interface { // of named data associated with an image. ImageBigDataSize(id, key string) (int64, error) - // ImageBigDataDigest retrieves the digest of a (possibly large) chunk - // of named data associated with an image. - ImageBigDataDigest(id, key string) (digest.Digest, error) - // SetImageBigData stores a (possibly large) chunk of named data associated // with an image. SetImageBigData(id, key string, data []byte) error @@ -351,10 +397,6 @@ type Store interface { // chunk of named data associated with a container. ContainerBigDataSize(id, key string) (int64, error) - // ContainerBigDataDigest retrieves the digest of a (possibly large) - // chunk of named data associated with a container. - ContainerBigDataDigest(id, key string) (digest.Digest, error) - // SetContainerBigData stores a (possibly large) chunk of named data // associated with a container. SetContainerBigData(id, key string, data []byte) error @@ -370,10 +412,6 @@ type Store interface { // and may have different metadata, big data items, and flags. ImagesByTopLayer(id string) ([]*Image, error) - // ImagesByDigest returns a list of images which contain a big data item - // named ImageDigestBigDataKey whose contents have the specified digest. - ImagesByDigest(d digest.Digest) ([]*Image, error) - // Container returns a specific container. Container(id string) (*Container, error) @@ -491,6 +529,11 @@ func GetStore(options StoreOptions) (Store, error) { if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } + for _, subdir := range []string{} { + if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + } if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } @@ -601,13 +644,7 @@ func (s *store) getGraphDriver() (drivers.Driver, error) { if s.graphDriver != nil { return s.graphDriver, nil } - config := drivers.Options{ - Root: s.graphRoot, - DriverOptions: s.graphOptions, - UIDMaps: s.uidMap, - GIDMaps: s.gidMap, - } - driver, err := drivers.New(s.graphDriverName, config) + driver, err := drivers.New(s.graphRoot, s.graphDriverName, s.graphOptions, s.uidMap, s.gidMap) if err != nil { return nil, err } @@ -627,9 +664,6 @@ func (s *store) GraphDriver() (drivers.Driver, error) { return s.getGraphDriver() } -// LayerStore obtains and returns a handle to the writeable layer store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. func (s *store) LayerStore() (LayerStore, error) { s.graphLock.Lock() defer s.graphLock.Unlock() @@ -662,9 +696,6 @@ func (s *store) LayerStore() (LayerStore, error) { return s.layerStore, nil } -// ROLayerStores obtains additional read/only layer store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not part of the exported Store interface. func (s *store) ROLayerStores() ([]ROLayerStore, error) { s.graphLock.Lock() defer s.graphLock.Unlock() @@ -691,9 +722,6 @@ func (s *store) ROLayerStores() ([]ROLayerStore, error) { return s.roLayerStores, nil } -// ImageStore obtains and returns a handle to the writable image store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. func (s *store) ImageStore() (ImageStore, error) { if s.imageStore != nil { return s.imageStore, nil @@ -701,9 +729,6 @@ func (s *store) ImageStore() (ImageStore, error) { return nil, ErrLoadError } -// ROImageStores obtains additional read/only image store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. func (s *store) ROImageStores() ([]ROImageStore, error) { if len(s.roImageStores) != 0 { return s.roImageStores, nil @@ -724,9 +749,6 @@ func (s *store) ROImageStores() ([]ROImageStore, error) { return s.roImageStores, nil } -// ContainerStore obtains and returns a handle to the container store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. func (s *store) ContainerStore() (ContainerStore, error) { if s.containerStore != nil { return s.containerStore, nil @@ -734,19 +756,16 @@ func (s *store) ContainerStore() (ContainerStore, error) { return nil, ErrLoadError } -func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) { +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) { rlstore, err := s.LayerStore() if err != nil { return nil, -1, err } - rlstores, err := s.ROLayerStores() - if err != nil { - return nil, -1, err - } rcstore, err := s.ContainerStore() if err != nil { return nil, -1, err } + rlstore.Lock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { @@ -761,15 +780,9 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w id = stringid.GenerateRandomID() } if parent != "" { - var ilayer *Layer - for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { - if l, err := lstore.Get(parent); err == nil && l != nil { - ilayer = l - parent = ilayer.ID - break - } - } - if ilayer == nil { + if l, err := rlstore.Get(parent); err == nil && l != nil { + parent = l.ID + } else { return nil, -1, ErrLayerUnknown } containers, err := rcstore.Containers() @@ -795,32 +808,31 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o id = stringid.GenerateRandomID() } - if layer != "" { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - var ilayer *Layer - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - ilayer, err = store.Get(layer) - if err == nil { - break - } + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + stores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + stores = append([]ROLayerStore{rlstore}, stores...) + var ilayer *Layer + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if ilayer == nil { - return nil, ErrLayerUnknown + ilayer, err = rlstore.Get(layer) + if err == nil { + break } - layer = ilayer.ID } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID ristore, err := s.ImageStore() if err != nil { @@ -857,22 +869,23 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat imageTopLayer := "" imageID := "" if image != "" { - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return nil, err } - istores, err := s.ROImageStores() + stores, err := s.ROImageStores() if err != nil { return nil, err } + stores = append([]ROImageStore{ristore}, stores...) var cimage *Image - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - cimage, err = store.Get(image) + cimage, err = ristore.Get(image) if err == nil { break } @@ -947,22 +960,23 @@ func (s *store) SetMetadata(id, metadata string) error { } func (s *store) Metadata(id string) (string, error) { - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return "", err } - lstores, err := s.ROLayerStores() + stores, err := s.ROLayerStores() if err != nil { return "", err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if store.Exists(id) { - return store.Metadata(id) + if rlstore.Exists(id) { + return rlstore.Metadata(id) } } @@ -974,48 +988,50 @@ func (s *store) Metadata(id string) (string, error) { if err != nil { return "", err } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + istores = append([]ROImageStore{istore}, istores...) + for _, ristore := range istores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - if store.Exists(id) { - return store.Metadata(id) + if ristore.Exists(id) { + return ristore.Metadata(id) } } - cstore, err := s.ContainerStore() + rcstore, err := s.ContainerStore() if err != nil { return "", err } - cstore.Lock() - defer cstore.Unlock() - if modified, err := cstore.Modified(); modified || err != nil { - cstore.Load() + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() } - if cstore.Exists(id) { - return cstore.Metadata(id) + if rcstore.Exists(id) { + return rcstore.Metadata(id) } return "", ErrNotAnID } func (s *store) ListImageBigData(id string) ([]string, error) { - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return nil, err } - istores, err := s.ROImageStores() + stores, err := s.ROImageStores() if err != nil { return nil, err } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - bigDataNames, err := store.BigDataNames(id) + bigDataNames, err := ristore.BigDataNames(id) if err == nil { return bigDataNames, err } @@ -1024,21 +1040,22 @@ func (s *store) ListImageBigData(id string) ([]string, error) { } func (s *store) ImageBigDataSize(id, key string) (int64, error) { - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return -1, err } - istores, err := s.ROImageStores() + stores, err := s.ROImageStores() if err != nil { return -1, err } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - size, err := store.BigDataSize(id, key) + size, err := ristore.BigDataSize(id, key) if err == nil { return size, nil } @@ -1046,14 +1063,14 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) { return -1, ErrSizeUnknown } -func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { +func (s *store) ImageBigData(id, key string) ([]byte, error) { ristore, err := s.ImageStore() if err != nil { - return "", err + return nil, err } stores, err := s.ROImageStores() if err != nil { - return "", err + return nil, err } stores = append([]ROImageStore{ristore}, stores...) for _, ristore := range stores { @@ -1062,34 +1079,12 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { if modified, err := ristore.Modified(); modified || err != nil { ristore.Load() } - d, err := ristore.BigDataDigest(id, key) - if err == nil && d.Validate() == nil { - return d, nil - } - } - return "", ErrDigestUnknown -} - -func (s *store) ImageBigData(id, key string) ([]byte, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - data, err := store.BigData(id, key) + data, err := ristore.BigData(id, key) if err == nil { return data, nil } } + return nil, ErrImageUnknown } @@ -1133,20 +1128,8 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } - return rcstore.BigDataSize(id, key) -} -func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - return rcstore.BigDataDigest(id, key) + return rcstore.BigDataSize(id, key) } func (s *store) ContainerBigData(id, key string) ([]byte, error) { @@ -1159,6 +1142,7 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } + return rcstore.BigData(id, key) } @@ -1172,6 +1156,7 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error { if modified, err := rcstore.Modified(); modified || err != nil { rcstore.Load() } + return rcstore.SetBigData(id, key, data) } @@ -1184,32 +1169,34 @@ func (s *store) Exists(id string) bool { if err != nil { return false } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + lstores = append([]ROLayerStore{lstore}, lstores...) + for _, rlstore := range lstores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if store.Exists(id) { + if rlstore.Exists(id) { return true } } - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return false } - istores, err := s.ROImageStores() + stores, err := s.ROImageStores() if err != nil { return false } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - if store.Exists(id) { + if ristore.Exists(id) { return true } } @@ -1230,20 +1217,15 @@ func (s *store) Exists(id string) bool { return false } -func dedupeNames(names []string) []string { +func (s *store) SetNames(id string, names []string) error { + deduped := []string{} seen := make(map[string]bool) - deduped := make([]string, 0, len(names)) for _, name := range names { if _, wasSeen := seen[name]; !wasSeen { seen[name] = true deduped = append(deduped, name) } } - return deduped -} - -func (s *store) SetNames(id string, names []string) error { - deduped := dedupeNames(names) rlstore, err := s.LayerStore() if err != nil { @@ -1287,40 +1269,42 @@ func (s *store) SetNames(id string, names []string) error { } func (s *store) Names(id string) ([]string, error) { - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return nil, err } - lstores, err := s.ROLayerStores() + stores, err := s.ROLayerStores() if err != nil { return nil, err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if l, err := store.Get(id); l != nil && err == nil { + if l, err := rlstore.Get(id); l != nil && err == nil { return l.Names, nil } } - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return nil, err } - istores, err := s.ROImageStores() + ristores, err := s.ROImageStores() if err != nil { return nil, err } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + ristores = append([]ROImageStore{ristore}, ristores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - if i, err := store.Get(id); i != nil && err == nil { + if i, err := ristore.Get(id); i != nil && err == nil { return i.Names, nil } } @@ -1341,57 +1325,44 @@ func (s *store) Names(id string) ([]string, error) { } func (s *store) Lookup(name string) (string, error) { - lstore, err := s.LayerStore() + rcstore, err := s.ContainerStore() if err != nil { return "", err } - lstores, err := s.ROLayerStores() + ristore, err := s.ImageStore() if err != nil { return "", err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if l, err := store.Get(name); l != nil && err == nil { - return l.ID, nil - } - } - - istore, err := s.ImageStore() + rlstore, err := s.LayerStore() if err != nil { return "", err } - istores, err := s.ROImageStores() - if err != nil { - return "", err + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if i, err := store.Get(name); i != nil && err == nil { - return i.ID, nil - } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() } - cstore, err := s.ContainerStore() - if err != nil { - return "", err + if l, err := rlstore.Get(name); l != nil && err == nil { + return l.ID, nil } - cstore.Lock() - defer cstore.Unlock() - if modified, err := cstore.Modified(); modified || err != nil { - cstore.Load() + if i, err := ristore.Get(name); i != nil && err == nil { + return i.ID, nil } - if c, err := cstore.Get(name); c != nil && err == nil { + if c, err := rcstore.Get(name); c != nil && err == nil { return c.ID, nil } - return "", ErrLayerUnknown } @@ -1787,72 +1758,75 @@ func (s *store) Unmount(id string) error { } func (s *store) Changes(from, to string) ([]archive.Change, error) { - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return nil, err } - lstores, err := s.ROLayerStores() + stores, err := s.ROLayerStores() if err != nil { return nil, err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if store.Exists(to) { - return store.Changes(from, to) + if rlstore.Exists(to) { + return rlstore.Changes(from, to) } } return nil, ErrLayerUnknown } func (s *store) DiffSize(from, to string) (int64, error) { - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return -1, err } - lstores, err := s.ROLayerStores() + stores, err := s.ROLayerStores() if err != nil { return -1, err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if store.Exists(to) { - return store.DiffSize(from, to) + if rlstore.Exists(to) { + return rlstore.DiffSize(from, to) } } return -1, ErrLayerUnknown } func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return nil, err } - lstores, err := s.ROLayerStores() + stores, err := s.ROLayerStores() if err != nil { return nil, err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if store.Exists(to) { - return store.Diff(from, to, options) + if rlstore.Exists(to) { + return rlstore.Diff(from, to, options) } } return nil, ErrLayerUnknown } -func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { +func (s *store) ApplyDiff(to string, diff archive.Reader) (int64, error) { rlstore, err := s.LayerStore() if err != nil { return -1, err @@ -1870,47 +1844,37 @@ func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { var layers []Layer - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return nil, err } - lstores, err := s.ROLayerStores() + stores, err := s.ROLayerStores() if err != nil { return nil, err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - storeLayers, err := m(store, d) + slayers, err := m(rlstore, d) if err != nil { - if errors.Cause(err) != ErrLayerUnknown { - return nil, err - } - continue + return nil, err } - layers = append(layers, storeLayers...) - } - if len(layers) == 0 { - return nil, ErrLayerUnknown + layers = append(layers, slayers...) } return layers, nil } func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { - if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) - } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) } func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { - if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) - } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } @@ -1923,14 +1887,15 @@ func (s *store) LayerSize(id string) (int64, error) { if err != nil { return -1, err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + lstores = append([]ROLayerStore{lstore}, lstores...) + for _, rlstore := range lstores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - if store.Exists(id) { - return store.Size(id) + if rlstore.Exists(id) { + return rlstore.Size(id) } } return -1, ErrLayerUnknown @@ -1938,53 +1903,55 @@ func (s *store) LayerSize(id string) (int64, error) { func (s *store) Layers() ([]Layer, error) { var layers []Layer - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return nil, err } - lstores, err := s.ROLayerStores() + stores, err := s.ROLayerStores() if err != nil { return nil, err } + stores = append([]ROLayerStore{rlstore}, stores...) - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - storeLayers, err := store.Layers() + slayers, err := rlstore.Layers() if err != nil { return nil, err } - layers = append(layers, storeLayers...) + layers = append(layers, slayers...) } return layers, nil } func (s *store) Images() ([]Image, error) { var images []Image - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return nil, err } - istores, err := s.ROImageStores() + stores, err := s.ROImageStores() if err != nil { return nil, err } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - storeImages, err := store.Images() + simages, err := ristore.Images() if err != nil { return nil, err } - images = append(images, storeImages...) + images = append(images, simages...) } return images, nil } @@ -2005,21 +1972,24 @@ func (s *store) Containers() ([]Container, error) { } func (s *store) Layer(id string) (*Layer, error) { - lstore, err := s.LayerStore() + rlstore, err := s.LayerStore() if err != nil { return nil, err } - lstores, err := s.ROLayerStores() + + stores, err := s.ROLayerStores() if err != nil { return nil, err } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROLayerStore{rlstore}, stores...) + + for _, rlstore := range stores { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() } - layer, err := store.Get(id) + layer, err := rlstore.Get(id) if err == nil { return layer, nil } @@ -2028,21 +1998,22 @@ func (s *store) Layer(id string) (*Layer, error) { } func (s *store) Image(id string) (*Image, error) { - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return nil, err } - istores, err := s.ROImageStores() + stores, err := s.ROImageStores() if err != nil { return nil, err } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - image, err := store.Get(id) + image, err := ristore.Get(id) if err == nil { return image, nil } @@ -2057,22 +2028,23 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return nil, err } - istore, err := s.ImageStore() + ristore, err := s.ImageStore() if err != nil { return nil, err } - istores, err := s.ROImageStores() + stores, err := s.ROImageStores() if err != nil { return nil, err } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() + stores = append([]ROImageStore{ristore}, stores...) + for _, ristore := range stores { + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() } - imageList, err := store.Images() + imageList, err := ristore.Images() if err != nil { return nil, err } @@ -2085,33 +2057,6 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return images, nil } -func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { - images := []*Image{} - - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - imageList, err := store.ByDigest(d) - if err != nil && err != ErrImageUnknown { - return nil, err - } - images = append(images, imageList...) - } - return images, nil -} - func (s *store) Container(id string) (*Container, error) { rcstore, err := s.ContainerStore() if err != nil { @@ -2331,7 +2276,7 @@ func makeBigDataBaseName(key string) string { } func stringSliceWithoutValue(slice []string, value string) []string { - modified := make([]string, 0, len(slice)) + modified := []string{} for _, v := range slice { if v == value { continue @@ -2349,12 +2294,6 @@ type OptionsConfig struct { // Image stores. Usually used to access Networked File System // for shared image content AdditionalImageStores []string `toml:"additionalimagestores"` - - // Size - Size string `toml:"size"` - - // OverrideKernelCheck - OverrideKernelCheck string `toml:"override_kernel_check"` } // TOML-friendly explicit tables used for conversions. @@ -2370,7 +2309,7 @@ type tomlConfig struct { func init() { DefaultStoreOptions.RunRoot = "/var/run/containers/storage" DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" - DefaultStoreOptions.GraphDriverName = "" + DefaultStoreOptions.GraphDriverName = "overlay" data, err := ioutil.ReadFile(configFile) if err != nil { @@ -2398,12 +2337,7 @@ func init() { for _, s := range config.Storage.Options.AdditionalImageStores { DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) } - if config.Storage.Options.Size != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) - } - if config.Storage.Options.OverrideKernelCheck != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck)) - } + if os.Getenv("STORAGE_DRIVER") != "" { DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") } diff --git a/vendor/github.com/containers/storage/tests/apply-diff.bats b/vendor/github.com/containers/storage/tests/apply-diff.bats deleted file mode 100644 index 0f4351dd1dde..000000000000 --- a/vendor/github.com/containers/storage/tests/apply-diff.bats +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "applydiff" { - # The checkdiffs function needs "tar". - if test -z "$(which tar 2> /dev/null)" ; then - skip "need tar" - fi - - # Create and populate three interesting layers. - populate - - # Extract the layers. - storage diff -u -f $TESTDIR/lower.tar $lowerlayer - storage diff -c -f $TESTDIR/middle.tar $midlayer - storage diff -u -f $TESTDIR/upper.tar $upperlayer - - # Delete the layers. - storage delete-layer $upperlayer - storage delete-layer $midlayer - storage delete-layer $lowerlayer - - # Create new layers and populate them using the layer diffs. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer="$output" - storage applydiff -f $TESTDIR/lower.tar "$lowerlayer" - - run storage --debug=false create-layer "$lowerlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - midlayer="$output" - storage applydiff -f $TESTDIR/middle.tar "$midlayer" - - run storage --debug=false create-layer "$midlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer="$output" - storage applydiff -f $TESTDIR/upper.tar "$upperlayer" - - # The contents of these new layers should match what the old ones had. - checkchanges - checkdiffs -} diff --git a/vendor/github.com/containers/storage/tests/bigdata.bats b/vendor/github.com/containers/storage/tests/bigdata.bats deleted file mode 100644 index e0c05a878d6b..000000000000 --- a/vendor/github.com/containers/storage/tests/bigdata.bats +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "image-data" { - # Bail if "sha256sum" isn't available. - if test -z "$(which sha256sum 2> /dev/null)" ; then - skip "need sha256sum" - fi - - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Make sure the image can be located. - storage exists -i $image - - # Make sure the image has no big data items associated with it. - run storage --debug=false list-image-data $image - [ "$status" -eq 0 ] - [ "$output" = "" ] - - # Create two random files. - createrandom $TESTDIR/big-item-1 1234 - createrandom $TESTDIR/big-item-2 5678 - - # Set each of those files as a big data item named after the file. - storage set-image-data -f $TESTDIR/big-item-1 $image big-item-1 - storage set-image-data -f $TESTDIR/big-item-2 $image big-item-2 - - # Get a list of the items. Make sure they're both listed. - run storagewithsorting --debug=false list-image-data $image - [ "$status" -eq 0 ] - [ "${#lines[*]}" -eq 2 ] - [ "${lines[0]}" = "big-item-1" ] - [ "${lines[1]}" = "big-item-2" ] - - # Check that the recorded sizes of the items match what we decided above. - run storage get-image-data-size $image no-such-item - [ "$status" -ne 0 ] - run storage --debug=false get-image-data-size $image big-item-1 - [ "$status" -eq 0 ] - [ "$output" -eq 1234 ] - run storage --debug=false get-image-data-size $image big-item-2 - [ "$status" -eq 0 ] - [ "$output" -eq 5678 ] - - # Save the contents of the big data items to disk and compare them with the originals. - run storage --debug=false get-image-data $image no-such-item - [ "$status" -ne 0 ] - storage get-image-data -f $TESTDIR/big-item-1.2 $image big-item-1 - cmp $TESTDIR/big-item-1 $TESTDIR/big-item-1.2 - storage get-image-data -f $TESTDIR/big-item-2.2 $image big-item-2 - cmp $TESTDIR/big-item-2 $TESTDIR/big-item-2.2 - - # Read the recorded digests of the items and compare them with the digests of the originals. - run storage get-image-data-digest $image no-such-item - [ "$status" -ne 0 ] - run storage --debug=false get-image-data-digest $image big-item-1 - [ "$status" -eq 0 ] - sum=$(sha256sum $TESTDIR/big-item-1) - sum=sha256:"${sum%% *}" - echo output:"$output": - echo sum:"$sum": - [ "$output" = "$sum" ] - run storage --debug=false get-image-data-digest $image big-item-2 - [ "$status" -eq 0 ] - sum=$(sha256sum $TESTDIR/big-item-2) - sum=sha256:"${sum%% *}" - echo output:"$output": - echo sum:"$sum": - [ "$output" = "$sum" ] -} - -@test "container-data" { - # Bail if "sha256sum" isn't available. - if test -z "$(which sha256sum 2> /dev/null)" ; then - skip "need sha256sum" - fi - - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container based on that image. - run storage --debug=false create-container $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Make sure the container can be located. - storage exists -c $container - - # Make sure the container has no big data items associated with it. - run storage --debug=false list-container-data $container - [ "$status" -eq 0 ] - [ "$output" = "" ] - - # Create two random files. - createrandom $TESTDIR/big-item-1 1234 - createrandom $TESTDIR/big-item-2 5678 - - # Set each of those files as a big data item named after the file. - storage set-container-data -f $TESTDIR/big-item-1 $container big-item-1 - storage set-container-data -f $TESTDIR/big-item-2 $container big-item-2 - - # Get a list of the items. Make sure they're both listed. - run storage --debug=false list-container-data $container - [ "$status" -eq 0 ] - [ "${#lines[*]}" -eq 2 ] - [ "${lines[0]}" = "big-item-1" ] - [ "${lines[1]}" = "big-item-2" ] - - # Check that the recorded sizes of the items match what we decided above. - run storage get-container-data-size $container no-such-item - [ "$status" -ne 0 ] - run storage --debug=false get-container-data-size $container big-item-1 - [ "$status" -eq 0 ] - [ "$output" -eq 1234 ] - run storage --debug=false get-container-data-size $container big-item-2 - [ "$status" -eq 0 ] - [ "$output" -eq 5678 ] - - # Save the contents of the big data items to disk and compare them with the originals. - run storage --debug=false get-container-data $container no-such-item - [ "$status" -ne 0 ] - storage get-container-data -f $TESTDIR/big-item-1.2 $container big-item-1 - cmp $TESTDIR/big-item-1 $TESTDIR/big-item-1.2 - storage get-container-data -f $TESTDIR/big-item-2.2 $container big-item-2 - cmp $TESTDIR/big-item-2 $TESTDIR/big-item-2.2 - - # Read the recorded digests of the items and compare them with the digests of the originals. - run storage get-container-data-digest $container no-such-item - [ "$status" -ne 0 ] - run storage --debug=false get-container-data-digest $container big-item-1 - [ "$status" -eq 0 ] - sum=$(sha256sum $TESTDIR/big-item-1) - sum=sha256:"${sum%% *}" - echo output:"$output": - echo sum:"$sum": - [ "$output" = "$sum" ] - run storage --debug=false get-container-data-digest $container big-item-2 - [ "$status" -eq 0 ] - sum=$(sha256sum $TESTDIR/big-item-2) - sum=sha256:"${sum%% *}" - echo output:"$output": - echo sum:"$sum": - [ "$output" = "$sum" ] -} diff --git a/vendor/github.com/containers/storage/tests/changes.bats b/vendor/github.com/containers/storage/tests/changes.bats deleted file mode 100644 index 85643ca0bd1b..000000000000 --- a/vendor/github.com/containers/storage/tests/changes.bats +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "changes" { - # Create and populate three interesting layers. - populate - - # Mount the layers. - run storage --debug=false mount "$lowerlayer" - [ "$status" -eq 0 ] - lowermount="$output" - run storage --debug=false mount "$midlayer" - [ "$status" -eq 0 ] - midmount="$output" - run storage --debug=false mount "$upperlayer" - [ "$status" -eq 0 ] - uppermount="$output" - - # Check the "changes" output. - checkchanges - - # Unmount the layers. - storage unmount $lowerlayer - storage unmount $midlayer - storage unmount $upperlayer - - # Now check the "changes" again. - checkchanges -} diff --git a/vendor/github.com/containers/storage/tests/container-dirs.bats b/vendor/github.com/containers/storage/tests/container-dirs.bats deleted file mode 100644 index 2cbc237b4fa3..000000000000 --- a/vendor/github.com/containers/storage/tests/container-dirs.bats +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "container-dirs" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Check that the layer can be found. - storage exists -l $layer - - # Create an image using the layer. - run storage --debug=false create-image -m danger $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Check that the image can be found. - storage exists -i $image - - # Create a container based on the layer. - run storage --debug=false create-container $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Check that the container can be found. - storage exists -c $container - - # Check that the container's user data directory is somewhere under the root. - run storage --debug=false get-container-dir $container - [ "$status" -eq 0 ] - [ "$output" != "" ] - dir=${output%% *} - touch "$dir"/dirfile - echo "$dir"/dirfile | grep -q ^"${TESTDIR}/root/" - - # Check that the container's user run data directory is somewhere under the run root. - run storage --debug=false get-container-run-dir $container - [ "$status" -eq 0 ] - [ "$output" != "" ] - rundir=${output%% *} - touch "$rundir"/rundirfile - echo "$rundir"/rundirfile | grep -q ^"${TESTDIR}/runroot/" -} diff --git a/vendor/github.com/containers/storage/tests/container.bats b/vendor/github.com/containers/storage/tests/container.bats deleted file mode 100644 index b123807ef80f..000000000000 --- a/vendor/github.com/containers/storage/tests/container.bats +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "container" { - # Create and populate three interesting layers. - populate - - # Create an image using to top layer. - run storage --debug=false create-image $upperlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container using the image. - name=wonderful-container - run storage --debug=false create-container --name $name $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${lines[0]} - - # Add a couple of big data items. - createrandom ${TESTDIR}/random1 - createrandom ${TESTDIR}/random2 - storage set-container-data -f ${TESTDIR}/random1 $container random1 - storage set-container-data -f ${TESTDIR}/random2 $container random2 - - # Get information about the container, and make sure the ID, name, and data names were preserved. - run storage container $container - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "ID: $container" ]] - [[ "$output" =~ "Name: $name" ]] - [[ "$output" =~ "Data: random1" ]] - [[ "$output" =~ "Data: random2" ]] -} diff --git a/vendor/github.com/containers/storage/tests/create-container.bats b/vendor/github.com/containers/storage/tests/create-container.bats deleted file mode 100644 index 2f7ad40b0a47..000000000000 --- a/vendor/github.com/containers/storage/tests/create-container.bats +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "create-container" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container based on that image. - run storage --debug=false create-container $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - firstcontainer=${output%% *} - - # Check that the container can be found. - storage exists -c $firstcontainer - - # Create another container based on the same image. - run storage --debug=false create-container $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - secondcontainer=${output%% *} - - # Check that *that* container can be found. - storage exists -c $secondcontainer - - # Check that a list of containers lists both of them. - run storage --debug=false containers - echo :"$output": - [ "$status" -eq 0 ] - [ "${#lines[*]}" -eq 2 ] - [ "${lines[0]}" != "${lines[1]}" ] - [ "${lines[0]}" = "$firstcontainer" ] || [ "${lines[0]}" = "$secondcontainer" ] - [ "${lines[1]}" = "$firstcontainer" ] || [ "${lines[1]}" = "$secondcontainer" ] -} diff --git a/vendor/github.com/containers/storage/tests/create-image.bats b/vendor/github.com/containers/storage/tests/create-image.bats deleted file mode 100644 index f0b4546057af..000000000000 --- a/vendor/github.com/containers/storage/tests/create-image.bats +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "create-image" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - firstimage=${output%% *} - - # Check that the image can be accessed. - storage exists -i $firstimage - - # Create another image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - secondimage=${output%% *} - - # Check that *that* image can be accessed. - storage exists -i $secondimage - - # Check that "images" lists the both of the images. - run storage --debug=false images - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 2 ] - [ "${lines[0]}" != "${lines[1]}" ] - [ "${lines[0]}" = "$firstimage" ] || [ "${lines[0]}" = "$secondimage" ] - [ "${lines[1]}" = "$firstimage" ] || [ "${lines[1]}" = "$secondimage" ] -} diff --git a/vendor/github.com/containers/storage/tests/create-layer.bats b/vendor/github.com/containers/storage/tests/create-layer.bats deleted file mode 100644 index 966dab1f969a..000000000000 --- a/vendor/github.com/containers/storage/tests/create-layer.bats +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "create-layer" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer="$output" - # Mount the layer. - run storage --debug=false mount $lowerlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowermount="$output" - # Put a file in the layer. - createrandom "$lowermount"/layer1file1 - - # Create a second layer based on the first one. - run storage --debug=false create-layer "$lowerlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - midlayer="$output" - # Mount that layer, too. - run storage --debug=false mount $midlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - midmount="$output" - # Check that the file from the first layer is there. - test -s "$midmount"/layer1file1 - # Check that we can remove it... - rm -f -v "$midmount"/layer1file1 - # ... and that doing so doesn't affect the first layer. - test -s "$lowermount"/layer1file1 - # Create a new file in this layer. - createrandom "$midmount"/layer2file1 - # Unmount this layer. - storage unmount $midlayer - # Unmount the first layer. - storage unmount $lowerlayer - - # Create a third layer based on the second one. - run storage --debug=false create-layer "$midlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer="$output" - # Mount this layer. - run storage --debug=false mount $upperlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - uppermount="$output" - # Check that the file we removed from the second layer is still gone. - run test -s "$uppermount"/layer1file1 - [ "$status" -ne 0 ] - # Check that the file we added to the second layer is still there. - test -s "$uppermount"/layer2file1 - # Unmount the third layer. - storage unmount $upperlayer - - # Get a list of the layers, and make sure all three, and no others, are listed. - run storage --debug=false layers - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 3 ] - [ "${lines[0]}" != "${lines[1]}" ] - [ "${lines[1]}" != "${lines[2]}" ] - [ "${lines[2]}" != "${lines[0]}" ] - [ "${lines[0]}" = "$lowerlayer" ] || [ "${lines[0]}" = "$midlayer" ] || [ "${lines[0]}" = "$upperlayer" ] - [ "${lines[1]}" = "$lowerlayer" ] || [ "${lines[1]}" = "$midlayer" ] || [ "${lines[1]}" = "$upperlayer" ] - [ "${lines[2]}" = "$lowerlayer" ] || [ "${lines[2]}" = "$midlayer" ] || [ "${lines[2]}" = "$upperlayer" ] -} diff --git a/vendor/github.com/containers/storage/tests/delete-container.bats b/vendor/github.com/containers/storage/tests/delete-container.bats deleted file mode 100644 index 6e092878ecfd..000000000000 --- a/vendor/github.com/containers/storage/tests/delete-container.bats +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "delete-container" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create an image using that layer. - run storage --debug=false create-container $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Check that the container can be found. - storage exists -c $container - - # Use delete-container to delete it. - storage delete-container $container - - # Check that the container is gone. - run storage exists -c $container - [ "$status" -ne 0 ] -} diff --git a/vendor/github.com/containers/storage/tests/delete-image.bats b/vendor/github.com/containers/storage/tests/delete-image.bats deleted file mode 100644 index 99f6d1023017..000000000000 --- a/vendor/github.com/containers/storage/tests/delete-image.bats +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "delete-image" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Check that the image can be found. - storage exists -i $image - - # Use delete-image to delete it. - storage delete-image $image - - # Check that the image is gone. - run storage exists -i $image - [ "$status" -ne 0 ] -} diff --git a/vendor/github.com/containers/storage/tests/delete-layer.bats b/vendor/github.com/containers/storage/tests/delete-layer.bats deleted file mode 100644 index 119c0a16602c..000000000000 --- a/vendor/github.com/containers/storage/tests/delete-layer.bats +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "delete-layer" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer="$output" - # Mount the layer. - run storage --debug=false mount $lowerlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowermount="$output" - # Create a random file in the layer. - createrandom "$lowermount"/layer1file1 - # Unmount the layer. - storage unmount $lowerlayer - - # Create a second layer based on the first one. - run storage --debug=false create-layer "$lowerlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - midlayer="$output" - # Mount the second layer. - run storage --debug=false mount $midlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - midmount="$output" - # Make sure the file from the first layer is present in this layer, then remove it. - test -s "$midmount"/layer1file1 - rm -f -v "$midmount"/layer1file1 - # Create a new file in this layer. - createrandom "$midmount"/layer2file1 - # Unmount the second layer. - storage unmount $midlayer - - # Create a third layer based on the second one. - run storage --debug=false create-layer "$midlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer="$output" - # Mount the third layer. - run storage --debug=false mount $upperlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - uppermount="$output" - # Make sure the file from the second layer is present in this layer, - # and that the one from the first didn't come back somehow.. - test -s "$uppermount"/layer2file1 - run test -s "$uppermount"/layer1file1 - [ "$status" -ne 0 ] - # Unmount the third layer. - storage unmount $upperlayer - - # Try to delete the first layer, which should fail because it has children. - run storage delete-layer $lowerlayer - [ "$status" -ne 0 ] - # Try to delete the second layer, which should fail because it has children. - run storage delete-layer $midlayer - [ "$status" -ne 0 ] - # Try to delete the third layer, which should succeed because it has no children. - storage delete-layer $upperlayer - # Try to delete the second again, and it should succeed because that child is gone. - storage delete-layer $midlayer - # Try to delete the first again, and it should succeed because that child is gone. - storage delete-layer $lowerlayer -} diff --git a/vendor/github.com/containers/storage/tests/delete.bats b/vendor/github.com/containers/storage/tests/delete.bats deleted file mode 100644 index 5dc79c97ecd7..000000000000 --- a/vendor/github.com/containers/storage/tests/delete.bats +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "delete" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container based on that image. - run storage --debug=false create-container $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Check that the container can be found, and delete it using the general delete command. - storage exists -c $container - storage delete $container - - # Check that the container is gone. - run storage exists -c $container - [ "$status" -ne 0 ] - - # Check that the image can be found, and delete it using the general delete command. - storage exists -i $image - storage delete $image - - # Check that the image is gone. - run storage exists -i $image - [ "$status" -ne 0 ] - - # Check that the layer can be found, and delete it using the general delete command. - storage exists -l $layer - storage delete $layer - - # Check that the layer is gone. - run storage exists -l $layer - [ "$status" -ne 0 ] -} diff --git a/vendor/github.com/containers/storage/tests/diff.bats b/vendor/github.com/containers/storage/tests/diff.bats deleted file mode 100644 index 879752ef01e6..000000000000 --- a/vendor/github.com/containers/storage/tests/diff.bats +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "diff" { - # The checkdiffs function needs "tar". - if test -z "$(which tar 2> /dev/null)" ; then - skip "need tar" - fi - - # Create and populate three interesting layers. - populate - - # Mount the layers. - run storage --debug=false mount "$lowerlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowermount="$output" - run storage --debug=false mount "$midlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - midmount="$output" - run storage --debug=false mount "$upperlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - uppermount="$output" - - # Check the "diff" output. - checkdiffs - - # Unmount the layers. - storage unmount $lowerlayer - storage unmount $midlayer - storage unmount $upperlayer - - # Now check the "diff" again. - checkdiffs -} diff --git a/vendor/github.com/containers/storage/tests/diffsize.bats b/vendor/github.com/containers/storage/tests/diffsize.bats deleted file mode 100644 index d1b35ff23d05..000000000000 --- a/vendor/github.com/containers/storage/tests/diffsize.bats +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "diffsize" { - # Create and populate three interesting layers. - populate - - # Mount the layers. - run storage --debug=false diffsize "$lowerlayer" - [ "$status" -eq 0 ] - echo size:"$output": - [ "$output" -ne 0 ] - run storage --debug=false diffsize "$midlayer" - [ "$status" -eq 0 ] - echo size:"$output": - [ "$output" -ne 0 ] - run storage --debug=false diffsize "$upperlayer" - [ "$status" -eq 0 ] - echo size:"$output": - [ "$output" -ne 0 ] -} diff --git a/vendor/github.com/containers/storage/tests/helpers.bash b/vendor/github.com/containers/storage/tests/helpers.bash index 32f037577d1c..7f54a28c3997 100755 --- a/vendor/github.com/containers/storage/tests/helpers.bash +++ b/vendor/github.com/containers/storage/tests/helpers.bash @@ -2,287 +2,19 @@ STORAGE_BINARY=${STORAGE_BINARY:-$(dirname ${BASH_SOURCE})/../containers-storage} TESTSDIR=${TESTSDIR:-$(dirname ${BASH_SOURCE})} -STORAGE_DRIVER=${STORAGE_DRIVER:-vfs} -STORAGE_OPTION=${STORAGE_OPTION:-} -PATH=$(dirname ${BASH_SOURCE})/..:${PATH} -# Create a unique root directory and a runroot directory. function setup() { - suffix=$(dd if=/dev/urandom bs=12 count=1 status=none | base64 | tr +/ABCDEFGHIJKLMNOPQRSTUVWXYZ _.abcdefghijklmnopqrstuvwxyz) + suffix=$(dd if=/dev/urandom bs=12 count=1 status=none | base64 | tr +/ _.) TESTDIR=${BATS_TMPDIR}/tmp.${suffix} rm -fr ${TESTDIR} mkdir -p ${TESTDIR}/{root,runroot} + REPO=${TESTDIR}/root } -# Delete the unique root directory and a runroot directory. function teardown() { - storage wipe - storage shutdown rm -fr ${TESTDIR} } -# Create a file "$1" with random contents of length $2, or 256. -function createrandom() { - dd if=/dev/urandom bs=1 count=${2:-256} of=${1:-${BATS_TMPDIR}/randomfile} status=none -} - -# Run the CLI with the specified options. function storage() { - ${STORAGE_BINARY} --debug --graph ${TESTDIR}/root --run ${TESTDIR}/runroot --storage-driver ${STORAGE_DRIVER} ${STORAGE_OPTION:+--storage-opt=${STORAGE_OPTION}} "$@" -} - -# Run the CLI with the specified options, and sort its output lines. -function storagewithsorting() { - storage "$@" | LC_ALL=C sort -} - -# Run the CLI with the specified options, and sort its output lines using the second field. -function storagewithsorting2() { - storage "$@" | LC_ALL=C sort -k2 -} - -# Create a few layers with files and directories added and removed at each -# layer. Their IDs are set to $lowerlayer, $midlayer, and $upperlayer. -populate() { - # Create a base layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer="$output" - # Mount the layer. - run storage --debug=false mount $lowerlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - local lowermount="$output" - # Create three files, and nine directories: three empty, three with subdirectories, three with files. - createrandom "$lowermount"/layer1file1 - createrandom "$lowermount"/layer1file2 - createrandom "$lowermount"/layer1file3 - mkdir "$lowermount"/layerdir1 - mkdir "$lowermount"/layerdir2 - mkdir "$lowermount"/layerdir3 - mkdir "$lowermount"/layerdir4 - mkdir "$lowermount"/layerdir4/layer1subdir - mkdir "$lowermount"/layerdir5 - mkdir "$lowermount"/layerdir5/layer1subdir - mkdir "$lowermount"/layerdir6 - mkdir "$lowermount"/layerdir6/layer1subdir - mkdir "$lowermount"/layerdir7 - createrandom "$lowermount"/layerdir7/layer1file4 - mkdir "$lowermount"/layerdir8 - createrandom "$lowermount"/layerdir8/layer1file5 - mkdir "$lowermount"/layerdir9 - createrandom "$lowermount"/layerdir9/layer1file6 - # Unmount the layer. - storage unmount $lowerlayer - - # Create a second layer based on the first. - run storage --debug=false create-layer "$lowerlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - midlayer="$output" - # Mount the second layer. - run storage --debug=false mount $midlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - local midmount="$output" - # Check that the files and directories from the first layer are present. - test -s "$midmount"/layer1file1 - test -s "$midmount"/layer1file2 - test -s "$midmount"/layer1file3 - test -d "$midmount"/layerdir1 - test -d "$midmount"/layerdir2 - test -d "$midmount"/layerdir3 - test -d "$midmount"/layerdir4 - test -d "$midmount"/layerdir4/layer1subdir - test -d "$midmount"/layerdir5 - test -d "$midmount"/layerdir5/layer1subdir - test -d "$midmount"/layerdir6 - test -d "$midmount"/layerdir6/layer1subdir - test -d "$midmount"/layerdir7 - test -s "$midmount"/layerdir7/layer1file4 - test -d "$midmount"/layerdir8 - test -s "$midmount"/layerdir8/layer1file5 - test -d "$midmount"/layerdir9 - test -s "$midmount"/layerdir9/layer1file6 - # Now remove some of those files and directories. - rm "$midmount"/layer1file1 - rm "$midmount"/layer1file2 - rmdir "$midmount"/layerdir1 - rmdir "$midmount"/layerdir2 - rmdir "$midmount"/layerdir4/layer1subdir - rmdir "$midmount"/layerdir4 - rmdir "$midmount"/layerdir5/layer1subdir - rmdir "$midmount"/layerdir5 - rm "$midmount"/layerdir7/layer1file4 - rmdir "$midmount"/layerdir7 - rm "$midmount"/layerdir8/layer1file5 - rmdir "$midmount"/layerdir8 - # Add a couple of new files and directories. - createrandom "$midmount"/layer2file1 - mkdir "$midmount"/layerdir10 - mkdir "$midmount"/layerdir11 - mkdir "$midmount"/layerdir11/layer2subdir - mkdir "$midmount"/layerdir12 - createrandom "$midmount"/layerdir12/layer2file2 - # Unmount the layer. - storage unmount $midlayer - - # Create a third layer based on the second. - run storage --debug=false create-layer "$midlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer="$output" - # Mount the third layer. - run storage --debug=false mount $upperlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - local uppermount="$output" - # Check that contents of the second layer are present. - test -s "$uppermount"/layer1file3 - test -d "$uppermount"/layerdir3 - test -d "$uppermount"/layerdir6 - test -d "$uppermount"/layerdir6/layer1subdir - test -d "$uppermount"/layerdir9 - test -s "$uppermount"/layerdir9/layer1file6 - test -s "$uppermount"/layer2file1 - test -d "$uppermount"/layerdir10 - test -d "$uppermount"/layerdir11 - test -d "$uppermount"/layerdir11/layer2subdir - test -d "$uppermount"/layerdir12 - test -s "$uppermount"/layerdir12/layer2file2 - # Re-add some contents for this layer that were removed earlier. - createrandom "$uppermount"/layerfile1 - mkdir "$uppermount"/layerdir1 - mkdir "$uppermount"/layerdir4 - mkdir "$uppermount"/layerdir4/layer1subdir - mkdir "$uppermount"/layerdir7 - createrandom "$uppermount"/layerdir7/layer1file4 - # Unmount the layer. - storage unmount $upperlayer -} - -# Check that the changes list for layers created by populate() correspond to -# what naive diff methods would generate. -checkchanges() { - # The first layer should all be additions. - storage changes $lowerlayer - run storagewithsorting2 --debug=false changes $lowerlayer - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 18 ] - [ "${lines[0]}" = 'Add "/layer1file1"' ] - [ "${lines[1]}" = 'Add "/layer1file2"' ] - [ "${lines[2]}" = 'Add "/layer1file3"' ] - [ "${lines[3]}" = 'Add "/layerdir1"' ] - [ "${lines[4]}" = 'Add "/layerdir2"' ] - [ "${lines[5]}" = 'Add "/layerdir3"' ] - [ "${lines[6]}" = 'Add "/layerdir4"' ] - [ "${lines[7]}" = 'Add "/layerdir4/layer1subdir"' ] - [ "${lines[8]}" = 'Add "/layerdir5"' ] - [ "${lines[9]}" = 'Add "/layerdir5/layer1subdir"' ] - [ "${lines[10]}" = 'Add "/layerdir6"' ] - [ "${lines[11]}" = 'Add "/layerdir6/layer1subdir"' ] - [ "${lines[12]}" = 'Add "/layerdir7"' ] - [ "${lines[13]}" = 'Add "/layerdir7/layer1file4"' ] - [ "${lines[14]}" = 'Add "/layerdir8"' ] - [ "${lines[15]}" = 'Add "/layerdir8/layer1file5"' ] - [ "${lines[16]}" = 'Add "/layerdir9"' ] - [ "${lines[17]}" = 'Add "/layerdir9/layer1file6"' ] - # Check the second layer. - storage changes $midlayer - run storagewithsorting2 --debug=false changes $midlayer - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 14 ] - [ "${lines[0]}" = 'Delete "/layer1file1"' ] - [ "${lines[1]}" = 'Delete "/layer1file2"' ] - [ "${lines[2]}" = 'Add "/layer2file1"' ] - [ "${lines[3]}" = 'Delete "/layerdir1"' ] - [ "${lines[4]}" = 'Add "/layerdir10"' ] - [ "${lines[5]}" = 'Add "/layerdir11"' ] - [ "${lines[6]}" = 'Add "/layerdir11/layer2subdir"' ] - [ "${lines[7]}" = 'Add "/layerdir12"' ] - [ "${lines[8]}" = 'Add "/layerdir12/layer2file2"' ] - [ "${lines[9]}" = 'Delete "/layerdir2"' ] - [ "${lines[10]}" = 'Delete "/layerdir4"' ] - [ "${lines[11]}" = 'Delete "/layerdir5"' ] - [ "${lines[12]}" = 'Delete "/layerdir7"' ] - [ "${lines[13]}" = 'Delete "/layerdir8"' ] - # Check the third layer. - storage changes $upperlayer - run storagewithsorting2 --debug=false changes $upperlayer - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 6 ] - [ "${lines[0]}" = 'Add "/layerdir1"' ] - [ "${lines[1]}" = 'Add "/layerdir4"' ] - [ "${lines[2]}" = 'Add "/layerdir4/layer1subdir"' ] - [ "${lines[3]}" = 'Add "/layerdir7"' ] - [ "${lines[4]}" = 'Add "/layerdir7/layer1file4"' ] - [ "${lines[5]}" = 'Add "/layerfile1"' ] -} - -# Check that the diff contents for layers created by populate() correspond to -# what naive diff methods would generate. -checkdiffs() { - # The first layer should all be additions. - storage diff -u -f $TESTDIR/lower.tar $lowerlayer - tar tf $TESTDIR/lower.tar > $TESTDIR/lower.txt - run env LC_ALL=C sort $TESTDIR/lower.txt - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 18 ] - [ "${lines[0]}" = 'layer1file1' ] - [ "${lines[1]}" = 'layer1file2' ] - [ "${lines[2]}" = 'layer1file3' ] - [ "${lines[3]}" = 'layerdir1/' ] - [ "${lines[4]}" = 'layerdir2/' ] - [ "${lines[5]}" = 'layerdir3/' ] - [ "${lines[6]}" = 'layerdir4/' ] - [ "${lines[7]}" = 'layerdir4/layer1subdir/' ] - [ "${lines[8]}" = 'layerdir5/' ] - [ "${lines[9]}" = 'layerdir5/layer1subdir/' ] - [ "${lines[10]}" = 'layerdir6/' ] - [ "${lines[11]}" = 'layerdir6/layer1subdir/' ] - [ "${lines[12]}" = 'layerdir7/' ] - [ "${lines[13]}" = 'layerdir7/layer1file4' ] - [ "${lines[14]}" = 'layerdir8/' ] - [ "${lines[15]}" = 'layerdir8/layer1file5' ] - [ "${lines[16]}" = 'layerdir9/' ] - [ "${lines[17]}" = 'layerdir9/layer1file6' ] - # Check the second layer. - storage diff -c -f $TESTDIR/middle.tar $midlayer - tar tzf $TESTDIR/middle.tar > $TESTDIR/middle.txt - run env LC_ALL=C sort $TESTDIR/middle.txt - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 14 ] - [ "${lines[0]}" = '.wh.layer1file1' ] - [ "${lines[1]}" = '.wh.layer1file2' ] - [ "${lines[2]}" = '.wh.layerdir1' ] - [ "${lines[3]}" = '.wh.layerdir2' ] - [ "${lines[4]}" = '.wh.layerdir4' ] - [ "${lines[5]}" = '.wh.layerdir5' ] - [ "${lines[6]}" = '.wh.layerdir7' ] - [ "${lines[7]}" = '.wh.layerdir8' ] - [ "${lines[8]}" = 'layer2file1' ] - [ "${lines[9]}" = 'layerdir10/' ] - [ "${lines[10]}" = 'layerdir11/' ] - [ "${lines[11]}" = 'layerdir11/layer2subdir/' ] - [ "${lines[12]}" = 'layerdir12/' ] - [ "${lines[13]}" = 'layerdir12/layer2file2' ] - # Check the third layer. - storage diff -u -f $TESTDIR/upper.tar $upperlayer - tar tf $TESTDIR/upper.tar > $TESTDIR/upper.txt - run env LC_ALL=C sort $TESTDIR/upper.txt - [ "$status" -eq 0 ] - echo :"$output": - [ "${#lines[*]}" -eq 6 ] - [ "${lines[0]}" = 'layerdir1/' ] - [ "${lines[1]}" = 'layerdir4/' ] - [ "${lines[2]}" = 'layerdir4/layer1subdir/' ] - [ "${lines[3]}" = 'layerdir7/' ] - [ "${lines[4]}" = 'layerdir7/layer1file4' ] - [ "${lines[5]}" = 'layerfile1' ] + ${STORAGE_BINARY} --debug --root ${TESTDIR}/root --runroot ${TESTDIR}/runroot "$@" } diff --git a/vendor/github.com/containers/storage/tests/image-by-digest.bats b/vendor/github.com/containers/storage/tests/image-by-digest.bats deleted file mode 100644 index aaf5eb8dd929..000000000000 --- a/vendor/github.com/containers/storage/tests/image-by-digest.bats +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "images-by-digest" { - # Bail if "sha256sum" isn't available. - if test -z "$(which sha256sum 2> /dev/null)" ; then - skip "need sha256sum" - fi - - # Create a couple of random files. - createrandom ${TESTDIR}/random1 - createrandom ${TESTDIR}/random2 - digest1=$(sha256sum ${TESTDIR}/random1) - digest2=$(sha256sum ${TESTDIR}/random2) - - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - firstimage=${output%% *} - # Set the first file as the manifest of this image. - run storage --debug=false set-image-data -f ${TESTDIR}/random1 ${firstimage} manifest - - # Create another image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - secondimage=${output%% *} - # Set the first file as the manifest of this image. - run storage --debug=false set-image-data -f ${TESTDIR}/random1 ${secondimage} manifest - - # Create yet another image using that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - thirdimage=${output%% *} - # Set the second file as the manifest of this image. - run storage --debug=false set-image-data -f ${TESTDIR}/random2 ${thirdimage} manifest - - # Check that "images-by-digest" lists the right images. - run storage --debug=false images-by-digest --quiet sha256:${digest1// *} - echo :"$output": - [ "$status" -eq 0 ] - [ "${#lines[*]}" -eq 2 ] - [ "${lines[0]}" != "${lines[1]}" ] - [ "${lines[0]}" = "$firstimage" ] || [ "${lines[0]}" = "$secondimage" ] - [ "${lines[1]}" = "$firstimage" ] || [ "${lines[1]}" = "$secondimage" ] - - run storage --debug=false images-by-digest --quiet sha256:${digest2// *} - echo :"$output": - [ "$status" -eq 0 ] - [ "${#lines[*]}" -eq 1 ] - [ "${lines[0]}" = "$thirdimage" ] - - run storage --debug=false delete-image ${secondimage} - echo :"$output": - [ "$status" -eq 0 ] - - run storage --debug=false images-by-digest --quiet sha256:${digest1// *} - echo :"$output": - [ "$status" -eq 0 ] - [ "${#lines[*]}" -eq 1 ] - [ "${lines[0]}" = "$firstimage" ] - - run storage --debug=false delete-image ${firstimage} - echo :"$output": - [ "$status" -eq 0 ] - - run storage --debug=false images-by-digest --quiet sha256:${digest1// *} - echo :"$output": - [ "$status" -eq 0 ] - [ "$output" = "" ] -} diff --git a/vendor/github.com/containers/storage/tests/image.bats b/vendor/github.com/containers/storage/tests/image.bats deleted file mode 100644 index 5eec5c1911d5..000000000000 --- a/vendor/github.com/containers/storage/tests/image.bats +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "image" { - # Create and populate three interesting layers. - populate - - # Create an image using to top layer. - name=wonderful-image - run storage --debug=false create-image --name $name $upperlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${lines[0]} - - # Add a couple of big data items. - createrandom ${TESTDIR}/random1 - createrandom ${TESTDIR}/random2 - storage set-image-data -f ${TESTDIR}/random1 $image random1 - storage set-image-data -f ${TESTDIR}/random2 $image random2 - - # Get information about the image, and make sure the ID, name, and data names were preserved. - run storage image $image - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "ID: $image" ]] - [[ "$output" =~ "Name: $name" ]] - [[ "$output" =~ "Data: random1" ]] - [[ "$output" =~ "Data: random2" ]] -} diff --git a/vendor/github.com/containers/storage/tests/import-layer.bats b/vendor/github.com/containers/storage/tests/import-layer.bats deleted file mode 100644 index 139f02807b73..000000000000 --- a/vendor/github.com/containers/storage/tests/import-layer.bats +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "import-layer" { - # The checkdiffs function needs "tar". - if test -z "$(which tar 2> /dev/null)" ; then - skip "need tar" - fi - - # Create and populate three interesting layers. - populate - - # Extract the layers. - storage diff -u -f $TESTDIR/lower.tar $lowerlayer - storage diff -c -f $TESTDIR/middle.tar $midlayer - storage diff -u -f $TESTDIR/upper.tar $upperlayer - - # Delete the layers. - storage delete-layer $upperlayer - storage delete-layer $midlayer - storage delete-layer $lowerlayer - - # Import new layers using the layer diffs. - run storage --debug=false import-layer -f $TESTDIR/lower.tar - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer="$output" - - run storage --debug=false import-layer -f $TESTDIR/middle.tar "$lowerlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - midlayer="$output" - - run storage --debug=false import-layer -f $TESTDIR/upper.tar "$midlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer="$output" - - # The contents of these new layers should match what the old ones had. - checkchanges - checkdiffs -} diff --git a/vendor/github.com/containers/storage/tests/metadata.bats b/vendor/github.com/containers/storage/tests/metadata.bats deleted file mode 100644 index eb2aabd5fd9d..000000000000 --- a/vendor/github.com/containers/storage/tests/metadata.bats +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "metadata" { - echo danger > $TESTDIR/danger.txt - - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Make sure the layer's there. - storage exists -l $layer - - # Create an image using the layer and directly-supplied metadata. - run storage --debug=false create-image -m danger $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Make sure that the image is there. - storage exists -i $image - - # Read back the metadata and make sure it's the right value. - run storage --debug=false metadata -q $image - [ "$status" -eq 0 ] - [ "$output" = "danger" ] - - # Change the metadata to a directly-supplied value. - run storage set-metadata -m thunder $image - [ "$status" -eq 0 ] - - # Read back the metadata and make sure it's the new value. - run storage --debug=false metadata -q $image - [ "$status" -eq 0 ] - [ "$output" = "thunder" ] - - # Change the metadata to a value supplied via a file. - storage set-metadata -f $TESTDIR/danger.txt $image - - # Read back the metadata and make sure it's the newer value. - run storage --debug=false metadata -q $image - [ "$status" -eq 0 ] - [ "$output" = "danger" ] - - # Create an image using the layer and metadata read from a file. - run storage --debug=false create-image -f $TESTDIR/danger.txt $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Make sure that the image is there. - storage exists -i $image - - # Read back the metadata and make sure it's the right value. - run storage --debug=false metadata -q $image - [ "$status" -eq 0 ] - [ "$output" = "danger" ] - - # Change the metadata to a directly-supplied value. - storage set-metadata -m thunder $image - - # Read back the metadata and make sure it's the new value. - run storage --debug=false metadata -q $image - [ "$status" -eq 0 ] - [ "$output" = "thunder" ] - - # Change the metadata to a value supplied via a file. - storage set-metadata -f $TESTDIR/danger.txt $image - - # Read back the metadata and make sure it's the newer value. - run storage --debug=false metadata -q $image - [ "$status" -eq 0 ] - [ "$output" = "danger" ] - - # Create a container based on the image and directly-supplied metadata. - run storage --debug=false create-container -m danger $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Make sure the container is there. - storage exists -c $container - - # Read the metadata and make sure it's the right value. - run storage --debug=false metadata -q $container - [ "$status" -eq 0 ] - [ "$output" = "danger" ] - - # Change the metadata to a new value. - storage set-metadata -m thunder $container - - # Read back the new metadata value. - run storage --debug=false metadata -q $container - [ "$status" -eq 0 ] - [ "$output" = "thunder" ] - - # Change the metadata to a new value read from a file. - storage set-metadata -f $TESTDIR/danger.txt $container - - # Read back the newer metadata value. - run storage --debug=false metadata -q $container - [ "$status" -eq 0 ] - [ "$output" = "danger" ] - - # Create a container based on the image and metadata read from a file. - run storage --debug=false create-container -f $TESTDIR/danger.txt $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Make sure the container is there. - storage exists -c $container - - # Read the metadata and make sure it's the right value. - run storage --debug=false metadata -q $container - [ "$status" -eq 0 ] - [ "$output" = "danger" ] - - # Change the metadata to a new value. - storage set-metadata -m thunder $container - - # Read back the new metadata value. - run storage --debug=false metadata -q $container - [ "$status" -eq 0 ] - [ "$output" = "thunder" ] - - # Change the metadata to a new value read from a file. - storage set-metadata -f $TESTDIR/danger.txt $container - - # Read back the newer metadata value. - run storage --debug=false metadata -q $container - [ "$status" -eq 0 ] - [ "$output" = "danger" ] -} diff --git a/vendor/github.com/containers/storage/tests/names.bats b/vendor/github.com/containers/storage/tests/names.bats deleted file mode 100644 index a1042bd3a53f..000000000000 --- a/vendor/github.com/containers/storage/tests/names.bats +++ /dev/null @@ -1,615 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -# Helper function to scan the list of names of an item for a particular value. -check-for-name() { - name="$1" - shift - run storage --debug=false get-names "$@" - [ "$status" -eq 0 ] - [[ "$output" =~ "$name" ]] -} - -@test "names at creation: layers" { - # Create a layer with no name. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer=$output - - # Verify that the layer exists and can be found by ID. - run storage exists -l $lowerlayer - [ "$status" -eq 0 ] - # Verify that these three names don't appear to be assigned. - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -ne 0 ] - run storage exists -l barlayer - [ "$status" -ne 0 ] - - # Create a new layer and give it two of the above-mentioned names. - run storage --debug=false create-layer -n foolayer -n barlayer $lowerlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer=${output%% *} - - # Verify that the new layer exists and can be found by its ID. - run storage exists -l $upperlayer - [ "$status" -eq 0 ] - # Verify that two of the names we checked earlier are now assigned, and to the new layer. - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -eq 0 ] - run storage exists -l barlayer - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $upperlayer - [ "$status" -ne 0 ] - run check-for-name foolayer $upperlayer - [ "$status" -eq 0 ] - run check-for-name barlayer $upperlayer - [ "$status" -eq 0 ] -} - -@test "add-names: layers" { - # Create a layer with no name. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer=$output - - # Verify that we can find the layer by its ID. - run storage exists -l $lowerlayer - [ "$status" -eq 0 ] - - # Check that these three names are not currently assigned. - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -ne 0 ] - run storage exists -l barlayer - [ "$status" -ne 0 ] - - # Create a new layer with names. - run storage --debug=false create-layer -n foolayer -n barlayer $lowerlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer=${output%% *} - - # Add names to the new layer. - run storage add-names -n newlayer -n otherlayer $upperlayer - [ "$status" -eq 0 ] - - # Verify that we can find the new layer by its ID. - run storage exists -l $upperlayer - [ "$status" -eq 0 ] - # Verify that the name we didn't assign is still unassigned, and that the two names we - # started with, along with the two we added, are assigned, to the new layer. - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -eq 0 ] - run storage exists -l barlayer - [ "$status" -eq 0 ] - run storage exists -l newlayer - [ "$status" -eq 0 ] - run storage exists -l otherlayer - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $upperlayer - [ "$status" -ne 0 ] - run check-for-name foolayer $upperlayer - [ "$status" -eq 0 ] - run check-for-name barlayer $upperlayer - [ "$status" -eq 0 ] - run check-for-name newlayer $upperlayer - [ "$status" -eq 0 ] - run check-for-name otherlayer $upperlayer - [ "$status" -eq 0 ] -} - -@test "set-names: layers" { - # Create a layer with no name. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer=$output - - # Verify that we can find the layer by its ID. - run storage exists -l $lowerlayer - [ "$status" -eq 0 ] - - # Check that these three names are not currently assigned. - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -ne 0 ] - run storage exists -l barlayer - [ "$status" -ne 0 ] - - # Create a new layer with two names. - run storage --debug=false create-layer -n foolayer -n barlayer $lowerlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer=${output%% *} - - # Assign a list of two names to the layer, which should remove its other names. - run storage set-names -n newlayer -n otherlayer $upperlayer - [ "$status" -eq 0 ] - - # Check that the old names are not assigned at all, but the new names are, to it. - run storage exists -l $upperlayer - [ "$status" -eq 0 ] - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -ne 0 ] - run storage exists -l barlayer - [ "$status" -ne 0 ] - run storage exists -l newlayer - [ "$status" -eq 0 ] - run storage exists -l otherlayer - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $upperlayer - [ "$status" -ne 0 ] - run check-for-name foolayer $upperlayer - [ "$status" -ne 0 ] - run check-for-name barlayer $upperlayer - [ "$status" -ne 0 ] - run check-for-name newlayer $upperlayer - [ "$status" -eq 0 ] - run check-for-name otherlayer $upperlayer - [ "$status" -eq 0 ] -} - -@test "move-names: layers" { - # Create a layer with no name. - run storage --debug=false create-layer -n foolayer -n barlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer=${output%% *} - - # Verify that we can find the layer by its ID. - run storage exists -l $lowerlayer - [ "$status" -eq 0 ] - - # Check that these three names are not currently assigned. - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -eq 0 ] - run storage exists -l barlayer - [ "$status" -eq 0 ] - - # Create another layer with no names. - run storage --debug=false create-layer $lowerlayer - [ "$status" -eq 0 ] - upperlayer=${output%% *} - - # Set names on that new layer, which should remove the names from the old one. - run storage set-names -n foolayer -n barlayer $upperlayer - [ "$status" -eq 0 ] - - # Verify that we can find the layer by its ID, and that the two names exist. - run storage exists -l $upperlayer - [ "$status" -eq 0 ] - run storage exists -l no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -l foolayer - [ "$status" -eq 0 ] - run storage exists -l barlayer - [ "$status" -eq 0 ] - - # Check that the names are attached to the new layer and not the old one. - run check-for-name foolayer $lowerlayer - [ "$status" -ne 0 ] - run check-for-name barlayer $lowerlayer - [ "$status" -ne 0 ] - run check-for-name foolayer $upperlayer - [ "$status" -eq 0 ] - run check-for-name barlayer $upperlayer - [ "$status" -eq 0 ] -} - -@test "names at creation: images" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image with names that uses that layer. - run storage --debug=false create-image -n fooimage -n barimage $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Check that we can find that image by ID and by its names. - run storage exists -i $image - [ "$status" -eq 0 ] - run storage exists -i no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -i fooimage - [ "$status" -eq 0 ] - run storage exists -i barimage - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $image - [ "$status" -ne 0 ] - run check-for-name fooimage $image - [ "$status" -eq 0 ] - run check-for-name barimage $image - [ "$status" -eq 0 ] -} - -@test "add-names: images" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image with names that uses that layer. - run storage --debug=false create-image -n fooimage -n barimage $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Check that we can find that image by ID and by its names. - run storage exists -i $image - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $image - [ "$status" -ne 0 ] - run check-for-name fooimage $image - [ "$status" -eq 0 ] - run check-for-name barimage $image - [ "$status" -eq 0 ] - - # Add two names to the image. - run storage add-names -n newimage -n otherimage $image - [ "$status" -eq 0 ] - - # Check that all of the names are now assigned. - run storage exists -i no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -i fooimage - [ "$status" -eq 0 ] - run storage exists -i barimage - [ "$status" -eq 0 ] - run storage exists -i newimage - [ "$status" -eq 0 ] - run storage exists -i otherimage - [ "$status" -eq 0 ] - - # Check that all of the names are now assigned to this image. - run check-for-name no-such-thing-as-this-name $image - [ "$status" -ne 0 ] - run check-for-name fooimage $image - [ "$status" -eq 0 ] - run check-for-name barimage $image - [ "$status" -eq 0 ] - run check-for-name newimage $image - [ "$status" -eq 0 ] - run check-for-name otherimage $image - [ "$status" -eq 0 ] -} - -@test "set-names: images" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image with names that uses that layer. - run storage --debug=false create-image -n fooimage -n barimage $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Check that we can find that image by ID and by its names. - run storage exists -i $image - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $image - [ "$status" -ne 0 ] - run check-for-name fooimage $image - [ "$status" -eq 0 ] - run check-for-name barimage $image - [ "$status" -eq 0 ] - - # Set the names for the image to two new names. - run storage set-names -n newimage -n otherimage $image - [ "$status" -eq 0 ] - - # Check that the two new names are the only ones assigned. - run storage exists -i no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -i fooimage - [ "$status" -ne 0 ] - run storage exists -i barimage - [ "$status" -ne 0 ] - run storage exists -i newimage - [ "$status" -eq 0 ] - run storage exists -i otherimage - [ "$status" -eq 0 ] - - # Check that the two new names are the only ones on this image. - run check-for-name no-such-thing-as-this-name $image - [ "$status" -ne 0 ] - run check-for-name fooimage $image - [ "$status" -ne 0 ] - run check-for-name barimage $image - [ "$status" -ne 0 ] - run check-for-name newimage $image - [ "$status" -eq 0 ] - run check-for-name otherimage $image - [ "$status" -eq 0 ] -} - -@test "move-names: images" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image with names that uses that layer. - run storage --debug=false create-image -n fooimage -n barimage $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - firstimage=${output%% *} - - # Create another image with no names. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Check that we can find the first image by ID and by its names. - run storage exists -i $firstimage - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $firstimage - [ "$status" -ne 0 ] - run check-for-name fooimage $firstimage - [ "$status" -eq 0 ] - run check-for-name barimage $firstimage - [ "$status" -eq 0 ] - - # Set a name list on the new image that includes the names of the old one. - run storage set-names -n fooimage -n barimage -n newimage -n otherimage $image - [ "$status" -eq 0 ] - - # Check that all of the names are assigned. - run storage exists -i no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -i fooimage - [ "$status" -eq 0 ] - run storage exists -i barimage - [ "$status" -eq 0 ] - run storage exists -i newimage - [ "$status" -eq 0 ] - run storage exists -i otherimage - [ "$status" -eq 0 ] - - # Check that all of the names are assigned to the new image. - run check-for-name no-such-thing-as-this-name $image - [ "$status" -ne 0 ] - run check-for-name fooimage $image - [ "$status" -eq 0 ] - run check-for-name barimage $image - [ "$status" -eq 0 ] - run check-for-name newimage $image - [ "$status" -eq 0 ] - run check-for-name otherimage $image - [ "$status" -eq 0 ] -} - -@test "names at creation: containers" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image that uses that layer. - run storage --debug=false create-image $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container with two names, based on that image. - run storage --debug=false create-container -n foocontainer -n barcontainer $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Check that we can find the container using either its ID or names. - run storage exists -c $container - [ "$status" -eq 0 ] - run storage exists -c no-such-thing-as-this-name - [ "$status" -ne 0 ] - run storage exists -c foocontainer - [ "$status" -eq 0 ] - run storage exists -c barcontainer - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $container - [ "$status" -ne 0 ] - run check-for-name foocontainer $container - [ "$status" -eq 0 ] - run check-for-name barcontainer $container - [ "$status" -eq 0 ] -} - -@test "add-names: containers" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image that uses that layer. - run storage --debug=false create-image -n fooimage -n barimage $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container with two names, based on that image. - run storage --debug=false create-container -n foocontainer -n barcontainer $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Check that we can find the container using either its ID or names. - run storage exists -c $container - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $container - [ "$status" -ne 0 ] - run check-for-name foocontainer $container - [ "$status" -eq 0 ] - run check-for-name barcontainer $container - [ "$status" -eq 0 ] - - # Add two names to the container. - run storage add-names -n newcontainer -n othercontainer $container - [ "$status" -eq 0 ] - - # Verify that all of those names are assigned to the container. - run storage exists -c $container - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $container - [ "$status" -ne 0 ] - run check-for-name foocontainer $container - [ "$status" -eq 0 ] - run check-for-name barcontainer $container - [ "$status" -eq 0 ] - run check-for-name newcontainer $container - [ "$status" -eq 0 ] - run check-for-name othercontainer $container - [ "$status" -eq 0 ] -} - -@test "set-names: containers" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image that uses that layer. - run storage --debug=false create-image -n fooimage -n barimage $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container with two names, based on that image. - run storage --debug=false create-container -n foocontainer -n barcontainer $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Check that we can find the container using either its ID or names. - run storage exists -c $container - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $container - [ "$status" -ne 0 ] - run check-for-name foocontainer $container - [ "$status" -eq 0 ] - run check-for-name barcontainer $container - [ "$status" -eq 0 ] - - # Set the list of names for the container to just these two values. - run storage set-names -n newcontainer -n othercontainer $container - [ "$status" -eq 0 ] - - # Check that these are the only two names attached to the container. - run storage exists -c $container - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $container - [ "$status" -ne 0 ] - run check-for-name foocontainer $container - [ "$status" -ne 0 ] - run check-for-name barcontainer $container - [ "$status" -ne 0 ] - run check-for-name newcontainer $container - [ "$status" -eq 0 ] - run check-for-name othercontainer $container - [ "$status" -eq 0 ] -} - -@test "move-names: containers" { - # Create a layer. - run storage --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - layer=$output - - # Create an image that uses that layer. - run storage --debug=false create-image -n fooimage -n barimage $layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # Create a container with two names, based on that image. - run storage --debug=false create-container -n foocontainer -n barcontainer $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - firstcontainer=${output%% *} - - # Create another container with two different names, based on that image. - run storage --debug=false create-container -n newcontainer -n othercontainer $image - [ "$status" -eq 0 ] - [ "$output" != "" ] - container=${output%% *} - - # Check that we can access both containers by ID, and that they have the right names. - run storage exists -c $firstcontainer - [ "$status" -eq 0 ] - run storage exists -c $container - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $firstcontainer - [ "$status" -ne 0 ] - run check-for-name foocontainer $firstcontainer - [ "$status" -eq 0 ] - run check-for-name barcontainer $firstcontainer - [ "$status" -eq 0 ] - run check-for-name newcontainer $firstcontainer - [ "$status" -ne 0 ] - run check-for-name othercontainer $firstcontainer - [ "$status" -ne 0 ] - run check-for-name foocontainer $container - [ "$status" -ne 0 ] - run check-for-name barcontainer $container - [ "$status" -ne 0 ] - run check-for-name newcontainer $container - [ "$status" -eq 0 ] - run check-for-name othercontainer $container - [ "$status" -eq 0 ] - - # Set the names on the new container to the names we gave the old one. - run storage set-names -n foocontainer -n barcontainer $container - [ "$status" -eq 0 ] - - # Check that the containers can still be found, and that the names are correctly set. - run storage exists -c $firstcontainer - [ "$status" -eq 0 ] - run storage exists -c $container - [ "$status" -eq 0 ] - run check-for-name no-such-thing-as-this-name $container - [ "$status" -ne 0 ] - run check-for-name foocontainer $firstcontainer - [ "$status" -ne 0 ] - run check-for-name barcontainer $firstcontainer - [ "$status" -ne 0 ] - run check-for-name newcontainer $firstcontainer - [ "$status" -ne 0 ] - run check-for-name othercontainer $firstcontainer - [ "$status" -ne 0 ] - run check-for-name foocontainer $container - [ "$status" -eq 0 ] - run check-for-name barcontainer $container - [ "$status" -eq 0 ] - run check-for-name newcontainer $container - [ "$status" -ne 0 ] - run check-for-name othercontainer $container - [ "$status" -ne 0 ] -} diff --git a/vendor/github.com/containers/storage/tests/status.bats b/vendor/github.com/containers/storage/tests/status.bats deleted file mode 100644 index 30f7a4b7d489..000000000000 --- a/vendor/github.com/containers/storage/tests/status.bats +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "status" { - run storage --debug=false status - echo :"$output": - [ "$status" -eq 0 ] - # Expect the first line of the output to be the storage root directory location. - [ "${lines[0]/:*/}" = "Root" ] - [ "${lines[0]/*: /}" = "${TESTDIR}/root" ] - # Expect the second line of the output to be the storage runroot directory location. - [ "${lines[1]/:*/}" = "Run Root" ] - [ "${lines[1]/*: /}" = "${TESTDIR}/runroot" ] - # Expect the third line of the output to be "Driver Name: $STORAGE_DRIVER". - [ "${lines[2]/:*/}" = "Driver Name" ] - [ "${lines[2]/*: /}" = "$STORAGE_DRIVER" ] -} diff --git a/vendor/github.com/containers/storage/tests/stores.bats b/vendor/github.com/containers/storage/tests/stores.bats deleted file mode 100644 index 97c7f57da221..000000000000 --- a/vendor/github.com/containers/storage/tests/stores.bats +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bats - -load helpers - -@test "additional-stores" { - case "$STORAGE_DRIVER" in - overlay*|vfs) - ;; - *) - skip "not supported by driver $STORAGE_DRIVER" - ;; - esac - # Initialize a store somewhere that we'll later use as a read-only store. - storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot layers - # Skip this test if we can't initialize the driver with the option. - if ! storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root layers ; then - skip - fi - # Create a layer in what will become the read-only store. - run storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot --debug=false create-layer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowerlayer="$output" - # Mount the layer in what will become the read-only store. - run storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot --debug=false mount $lowerlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - lowermount="$output" - # Put a file in the layer in what will become the read-only store. - createrandom "$lowermount"/layer1file1 - - # Create a second layer based on the first one in what will become the read-only store. - run storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot --debug=false create-layer "$lowerlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - midlayer="$output" - # Mount that layer, too. - run storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot --debug=false mount $midlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - midmount="$output" - # Check that the file from the first layer is there. - test -s "$midmount"/layer1file1 - # Check that we can remove it... - rm -f -v "$midmount"/layer1file1 - # ... and that doing so doesn't affect the first layer. - test -s "$lowermount"/layer1file1 - # Create a new file in this layer. - createrandom "$midmount"/layer2file1 - # Unmount this layer. - storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot unmount $midlayer - # Unmount the first layer. - storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot unmount $lowerlayer - - # Create an image using this second layer. - run storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot --debug=false create-image $midlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - image=${output%% *} - - # We no longer need to use the read-only root as a writeable location, so shut it down. - storage --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot shutdown - - # Create a third layer based on the second one. - run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root --debug=false create-layer "$midlayer" - [ "$status" -eq 0 ] - [ "$output" != "" ] - upperlayer="$output" - # Mount this layer. - run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root --debug=false mount $upperlayer - [ "$status" -eq 0 ] - [ "$output" != "" ] - uppermount="$output" - # Check that the file we removed from the second layer is still gone. - run test -s "$uppermount"/layer1file1 - [ "$status" -ne 0 ] - # Check that the file we added to the second layer is still there. - test -s "$uppermount"/layer2file1 - # Unmount the third layer. - storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root unmount $upperlayer - - # Create a container based on the image. - run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root --debug=false create-container "$image" - [ "$status" -eq 0 ] - [ "$output" != "" ] - container="$output" - # Mount this container. - run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root --debug=false mount $container - [ "$status" -eq 0 ] - [ "$output" != "" ] - containermount="$output" - # Check that the file we removed from the second layer is still gone. - run test -s "$containermount"/layer1file1 - [ "$status" -ne 0 ] - # Check that the file we added to the second layer is still there. - test -s "$containermount"/layer2file1 - # Unmount the container. - storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root delete-container $container -} diff --git a/vendor/github.com/containers/storage/tests/test_drivers.bash b/vendor/github.com/containers/storage/tests/test_drivers.bash deleted file mode 100755 index 641d11df4dfa..000000000000 --- a/vendor/github.com/containers/storage/tests/test_drivers.bash +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -aufs() { - modprobe aufs 2> /dev/null - grep -E -q ' aufs$' /proc/filesystems -} - -btrfs() { - [ $(stat -f -c %T ${TMPDIR}) = btrfs ] -} - -devicemapper() { - pkg-config devmapper 2> /dev/null -} - -overlay() { - modprobe overlay 2> /dev/null - grep -E -q ' overlay$' /proc/filesystems -} - -zfs() { - [ "$(stat -f -c %T ${TMPDIR:-/tmp})" = zfs ] -} - -if [ "$STORAGE_DRIVER" = "" ] ; then - drivers=vfs - if aufs ; then - drivers="$drivers aufs" - fi - if btrfs; then - drivers="$drivers btrfs" - fi - if devicemapper; then - drivers="$drivers devicemapper" - fi - if overlay; then - drivers="$drivers overlay" - fi - if zfs; then - drivers="$drivers zfs" - fi -else - drivers="$STORAGE_DRIVER" -fi -set -e -for driver in $drivers ; do - echo '['STORAGE_DRIVER="$driver"']' - env STORAGE_DRIVER="$driver" $(dirname ${BASH_SOURCE})/test_runner.bash "$@" -done diff --git a/vendor/github.com/containers/storage/tests/test_runner.bash b/vendor/github.com/containers/storage/tests/test_runner.sh similarity index 92% rename from vendor/github.com/containers/storage/tests/test_runner.bash rename to vendor/github.com/containers/storage/tests/test_runner.sh index ddd736ff603f..868df60e31f8 100755 --- a/vendor/github.com/containers/storage/tests/test_runner.bash +++ b/vendor/github.com/containers/storage/tests/test_runner.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index a30f8feb5b24..81fbbaddc825 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -1,9 +1,11 @@ github.com/BurntSushi/toml master github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8 -github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/sirupsen/logrus v1.0.0 github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 +github.com/go-check/check 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/opencontainers/go-digest master @@ -11,11 +13,8 @@ github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pkg/errors master -github.com/pmezard/go-difflib v1.0.0 -github.com/sirupsen/logrus v1.0.0 -github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/tchap/go-patricia v2.2.6 -github.com/vbatts/tar-split v0.10.2 -golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 -golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 -github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d +golang.org/x/sys d75a52659825e75fff6158388dddc6a5b04f9ba5 diff --git a/vendor/k8s.io/kubernetes/origin.sha b/vendor/k8s.io/kubernetes/origin.sha new file mode 100644 index 000000000000..5e16030c39b6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/origin.sha @@ -0,0 +1 @@ +2f933b0ad87744106b8ec0f6c4a2722c04fecca6