diff --git a/hack/import-restrictions.json b/hack/import-restrictions.json
index 1daa37fd702c..76ec17c924a2 100644
--- a/hack/import-restrictions.json
+++ b/hack/import-restrictions.json
@@ -524,16 +524,15 @@
       "vendor/github.com/davecgh/go-spew/spew",
 
       "github.com/openshift/origin/pkg/authorization/generated",
-      "github.com/openshift/origin/pkg/build/apis/build/v1",
       "github.com/openshift/origin/pkg/build/generated",
-      "github.com/openshift/origin/pkg/build/apis/build/internal_helpers",
-	  "github.com/openshift/origin/pkg/image/apis/image/v1",
+      "github.com/openshift/origin/pkg/image/apis/image/v1",
       "github.com/openshift/origin/pkg/image/generated",
-	  "github.com/openshift/origin/pkg/image/util",
+      "github.com/openshift/origin/pkg/image/util",
       "github.com/openshift/origin/pkg/network/generated",
       "github.com/openshift/origin/pkg/oauth/generated",
       "github.com/openshift/origin/pkg/project/generated",
       "github.com/openshift/origin/pkg/quota/generated",
+      "github.com/openshift/origin/pkg/route/apis/route/v1",
       "github.com/openshift/origin/pkg/route/generated",
       "github.com/openshift/origin/pkg/template/apis/template/v1",
       "github.com/openshift/origin/pkg/template/client/v1",
@@ -561,6 +560,7 @@
       "github.com/openshift/origin/pkg/build/apis/build",
       "github.com/openshift/origin/pkg/build/apis/build/install",
       "github.com/openshift/origin/pkg/build/buildapihelpers",
+      "github.com/openshift/origin/pkg/build/apis/build/internal_helpers",
       "github.com/openshift/origin/pkg/build/client",
       "github.com/openshift/origin/pkg/build/client/v1",
       "github.com/openshift/origin/pkg/build/util",
@@ -587,7 +587,6 @@
       "github.com/openshift/origin/pkg/cmd/util/variable",
       "github.com/openshift/origin/pkg/git",
       "github.com/openshift/origin/pkg/image/apis/image",
-      "github.com/openshift/origin/pkg/image/apis/image/install",
       "github.com/openshift/origin/pkg/image/apis/image/v1/trigger",
       "github.com/openshift/origin/pkg/image/apis/image/docker10",
       "github.com/openshift/origin/pkg/image/apis/image/reference",
@@ -645,7 +644,6 @@
       "vendor/github.com/miekg/dns",
 
       "vendor/k8s.io/kubernetes/pkg/api/legacyscheme",
-      "vendor/k8s.io/kubernetes/pkg/api/ref",
       "vendor/k8s.io/kubernetes/pkg/controller",
       "vendor/k8s.io/kubernetes/pkg/controller/deployment/util",
       "vendor/k8s.io/kubernetes/pkg/credentialprovider",
diff --git a/pkg/api/imagereferencemutators/pods.go b/pkg/api/imagereferencemutators/pods.go
index b69b37ef12eb..71bdd71de5e7 100644
--- a/pkg/api/imagereferencemutators/pods.go
+++ b/pkg/api/imagereferencemutators/pods.go
@@ -3,11 +3,13 @@ package imagereferencemutators
 import (
 	"fmt"
 
-	appsv1beta1 "k8s.io/api/apps/v1beta1"
+	kappsv1 "k8s.io/api/apps/v1"
+	kappsv1beta1 "k8s.io/api/apps/v1beta1"
+	kappsv1beta2 "k8s.io/api/apps/v1beta2"
 	batchv1 "k8s.io/api/batch/v1"
+	batchv1beta1 "k8s.io/api/batch/v1beta1"
 	batchv2alpha1 "k8s.io/api/batch/v2alpha1"
 	corev1 "k8s.io/api/core/v1"
-	kapiv1 "k8s.io/api/core/v1"
 	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -93,38 +95,72 @@ func GetPodSpec(obj runtime.Object) (*kapi.PodSpec, *field.Path, error) {
 // GetPodSpecV1 returns a mutable pod spec out of the provided object, including a field path
 // to the field in the object, or an error if the object does not contain a pod spec.
 // This only returns pod specs for v1 compatible objects.
-func GetPodSpecV1(obj runtime.Object) (*kapiv1.PodSpec, *field.Path, error) {
+func GetPodSpecV1(obj runtime.Object) (*corev1.PodSpec, *field.Path, error) {
 	switch r := obj.(type) {
-	case *kapiv1.Pod:
+
+	case *corev1.Pod:
 		return &r.Spec, field.NewPath("spec"), nil
-	case *kapiv1.PodTemplate:
+
+	case *corev1.PodTemplate:
 		return &r.Template.Spec, field.NewPath("template", "spec"), nil
-	case *kapiv1.ReplicationController:
+
+	case *corev1.ReplicationController:
 		if r.Spec.Template != nil {
 			return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
 		}
+
 	case *extensionsv1beta1.DaemonSet:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1.DaemonSet:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1beta2.DaemonSet:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *extensionsv1beta1.Deployment:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1.Deployment:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1beta1.Deployment:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1beta2.Deployment:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *extensionsv1beta1.ReplicaSet:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1.ReplicaSet:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1beta2.ReplicaSet:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *batchv1.Job:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *batchv2alpha1.CronJob:
 		return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil
+	case *batchv1beta1.CronJob:
+		return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil
+
 	case *batchv2alpha1.JobTemplate:
 		return &r.Template.Spec.Template.Spec, field.NewPath("template", "spec", "template", "spec"), nil
-	case *appsv1beta1.StatefulSet:
+	case *batchv1beta1.JobTemplate:
+		return &r.Template.Spec.Template.Spec, field.NewPath("template", "spec", "template", "spec"), nil
+
+	case *kappsv1.StatefulSet:
+		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+	case *kappsv1beta1.StatefulSet:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
-	case *appsv1beta1.Deployment:
+	case *kappsv1beta2.StatefulSet:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *securityv1.PodSecurityPolicySubjectReview:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *securityv1.PodSecurityPolicySelfSubjectReview:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *securityv1.PodSecurityPolicyReview:
 		return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
+
 	case *appsv1.DeploymentConfig:
 		if r.Spec.Template != nil {
 			return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil
@@ -137,68 +173,101 @@ func GetPodSpecV1(obj runtime.Object) (*kapiv1.PodSpec, *field.Path, error) {
 // the object contains, or false if no such object is available.
 func GetTemplateMetaObject(obj runtime.Object) (metav1.Object, bool) {
 	switch r := obj.(type) {
-	case *kapiv1.PodTemplate:
+
+	case *kapi.PodTemplate:
+		return &r.Template.ObjectMeta, true
+	case *corev1.PodTemplate:
 		return &r.Template.ObjectMeta, true
-	case *kapiv1.ReplicationController:
+
+	case *kapi.ReplicationController:
+		if r.Spec.Template != nil {
+			return &r.Spec.Template.ObjectMeta, true
+		}
+	case *corev1.ReplicationController:
 		if r.Spec.Template != nil {
 			return &r.Spec.Template.ObjectMeta, true
 		}
+
+	case *extensions.DaemonSet:
+		return &r.Spec.Template.ObjectMeta, true
 	case *extensionsv1beta1.DaemonSet:
 		return &r.Spec.Template.ObjectMeta, true
-	case *extensionsv1beta1.Deployment:
+	case *kappsv1.DaemonSet:
 		return &r.Spec.Template.ObjectMeta, true
-	case *extensionsv1beta1.ReplicaSet:
+	case *kappsv1beta2.DaemonSet:
 		return &r.Spec.Template.ObjectMeta, true
-	case *batchv1.Job:
+
+	case *extensions.Deployment:
 		return &r.Spec.Template.ObjectMeta, true
-	case *batchv2alpha1.CronJob:
-		return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true
-	case *batchv2alpha1.JobTemplate:
-		return &r.Template.Spec.Template.ObjectMeta, true
-	case *appsv1beta1.StatefulSet:
+	case *extensionsv1beta1.Deployment:
 		return &r.Spec.Template.ObjectMeta, true
-	case *appsv1beta1.Deployment:
+	case *kappsv1.Deployment:
 		return &r.Spec.Template.ObjectMeta, true
-	case *securityv1.PodSecurityPolicySubjectReview:
+	case *kappsv1beta1.Deployment:
 		return &r.Spec.Template.ObjectMeta, true
-	case *securityv1.PodSecurityPolicySelfSubjectReview:
+	case *kappsv1beta2.Deployment:
 		return &r.Spec.Template.ObjectMeta, true
-	case *securityv1.PodSecurityPolicyReview:
+
+	case *extensions.ReplicaSet:
 		return &r.Spec.Template.ObjectMeta, true
-	case *appsv1.DeploymentConfig:
-		if r.Spec.Template != nil {
-			return &r.Spec.Template.ObjectMeta, true
-		}
-	case *kapi.PodTemplate:
-		return &r.Template.ObjectMeta, true
-	case *kapi.ReplicationController:
-		if r.Spec.Template != nil {
-			return &r.Spec.Template.ObjectMeta, true
-		}
-	case *extensions.DaemonSet:
+	case *extensionsv1beta1.ReplicaSet:
 		return &r.Spec.Template.ObjectMeta, true
-	case *extensions.Deployment:
+	case *kappsv1.ReplicaSet:
 		return &r.Spec.Template.ObjectMeta, true
-	case *extensions.ReplicaSet:
+	case *kappsv1beta2.ReplicaSet:
 		return &r.Spec.Template.ObjectMeta, true
+
 	case *batch.Job:
 		return &r.Spec.Template.ObjectMeta, true
+	case *batchv1.Job:
+		return &r.Spec.Template.ObjectMeta, true
+
 	case *batch.CronJob:
 		return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true
+	case *batchv2alpha1.CronJob:
+		return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true
+	case *batchv1beta1.CronJob:
+		return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true
+
 	case *batch.JobTemplate:
 		return &r.Template.Spec.Template.ObjectMeta, true
+	case *batchv2alpha1.JobTemplate:
+		return &r.Template.Spec.Template.ObjectMeta, true
+	case *batchv1beta1.JobTemplate:
+		return &r.Template.Spec.Template.ObjectMeta, true
+
 	case *apps.StatefulSet:
 		return &r.Spec.Template.ObjectMeta, true
+	case *kappsv1.StatefulSet:
+		return &r.Spec.Template.ObjectMeta, true
+	case *kappsv1beta1.StatefulSet:
+		return &r.Spec.Template.ObjectMeta, true
+	case *kappsv1beta2.StatefulSet:
+		return &r.Spec.Template.ObjectMeta, true
+
 	case *securityapi.PodSecurityPolicySubjectReview:
 		return &r.Spec.Template.ObjectMeta, true
+	case *securityv1.PodSecurityPolicySubjectReview:
+		return &r.Spec.Template.ObjectMeta, true
+
 	case *securityapi.PodSecurityPolicySelfSubjectReview:
 		return &r.Spec.Template.ObjectMeta, true
+	case *securityv1.PodSecurityPolicySelfSubjectReview:
+		return &r.Spec.Template.ObjectMeta, true
+
 	case *securityapi.PodSecurityPolicyReview:
 		return &r.Spec.Template.ObjectMeta, true
+	case *securityv1.PodSecurityPolicyReview:
+		return &r.Spec.Template.ObjectMeta, true
+
 	case *appsapi.DeploymentConfig:
 		if r.Spec.Template != nil {
 			return &r.Spec.Template.ObjectMeta, true
 		}
+	case *appsv1.DeploymentConfig:
+		if r.Spec.Template != nil {
+			return &r.Spec.Template.ObjectMeta, true
+		}
 	}
 	return nil, false
 }
@@ -212,7 +281,7 @@ func (m containerMutator) GetImage() string      { return m.Image }
 func (m containerMutator) SetImage(image string) { m.Image = image }
 
 type containerV1Mutator struct {
-	*kapiv1.Container
+	*corev1.Container
 }
 
 func (m containerV1Mutator) GetName() string       { return m.Name }
@@ -322,8 +391,8 @@ func (m *podSpecMutator) GetContainerByIndex(init bool, i int) (ContainerMutator
 
 // podSpecV1Mutator implements the mutation interface over objects with a pod spec.
 type podSpecV1Mutator struct {
-	spec    *kapiv1.PodSpec
-	oldSpec *kapiv1.PodSpec
+	spec    *corev1.PodSpec
+	oldSpec *corev1.PodSpec
 	path    *field.Path
 }
 
@@ -331,7 +400,7 @@ func (m *podSpecV1Mutator) Path() *field.Path {
 	return m.path
 }
 
-func hasIdenticalPodSpecV1Image(spec *kapiv1.PodSpec, containerName, image string) bool {
+func hasIdenticalPodSpecV1Image(spec *corev1.PodSpec, containerName, image string) bool {
 	if spec == nil {
 		return false
 	}
@@ -405,7 +474,7 @@ func (m *podSpecV1Mutator) GetContainerByName(name string) (ContainerMutator, bo
 }
 
 func (m *podSpecV1Mutator) GetContainerByIndex(init bool, i int) (ContainerMutator, bool) {
-	var container *kapiv1.Container
+	var container *corev1.Container
 	spec := m.spec
 	if init {
 		if i < 0 || i >= len(spec.InitContainers) {
diff --git a/pkg/apps/apis/apps/helpers.go b/pkg/apps/apis/apps/helpers.go
index bbd23626d049..68364f53685d 100644
--- a/pkg/apps/apis/apps/helpers.go
+++ b/pkg/apps/apis/apps/helpers.go
@@ -1,11 +1,7 @@
 package apps
 
 import (
-	"fmt"
-
 	kapi "k8s.io/kubernetes/pkg/apis/core"
-
-	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 )
 
 // DeploymentToPodLogOptions builds a PodLogOptions object out of a DeploymentLogOptions.
@@ -25,102 +21,3 @@ func DeploymentToPodLogOptions(opts *DeploymentLogOptions) *kapi.PodLogOptions {
 		LimitBytes:   opts.LimitBytes,
 	}
 }
-
-// TemplateImage is a structure for helping a caller iterate over a PodSpec
-type TemplateImage struct {
-	Image string
-
-	Ref *imageapi.DockerImageReference
-
-	From *kapi.ObjectReference
-
-	Container *kapi.Container
-}
-
-// templateImageForContainer takes a container and returns a TemplateImage.
-func templateImageForContainer(container *kapi.Container, triggerFn TriggeredByFunc) (TemplateImage, error) {
-	var ref imageapi.DockerImageReference
-	if trigger, ok := triggerFn(container); ok {
-		trigger.Image = container.Image
-		trigger.Container = container
-		return trigger, nil
-	}
-	ref, err := imageapi.ParseDockerImageReference(container.Image)
-	if err != nil {
-		return TemplateImage{Image: container.Image, Container: container}, err
-	}
-	return TemplateImage{Image: container.Image, Ref: &ref, Container: container}, nil
-}
-
-// TemplateImageForContainer locates the requested container in a pod spec, returning information about the
-// trigger (if it exists), or an error.
-func TemplateImageForContainer(pod *kapi.PodSpec, triggerFn TriggeredByFunc, containerName string) (TemplateImage, error) {
-	for i := range pod.Containers {
-		container := &pod.Containers[i]
-		if container.Name != containerName {
-			continue
-		}
-		return templateImageForContainer(container, triggerFn)
-	}
-	for i := range pod.InitContainers {
-		container := &pod.InitContainers[i]
-		if container.Name != containerName {
-			continue
-		}
-		return templateImageForContainer(container, triggerFn)
-	}
-	return TemplateImage{}, fmt.Errorf("no container %q found", containerName)
-}
-
-// eachTemplateImage invokes triggerFn and fn on the provided container.
-func eachTemplateImage(container *kapi.Container, triggerFn TriggeredByFunc, fn func(TemplateImage, error)) {
-	image, err := templateImageForContainer(container, triggerFn)
-	fn(image, err)
-}
-
-// EachTemplateImage iterates a pod spec, looking for triggers that match each container and invoking
-// fn with each located image.
-func EachTemplateImage(pod *kapi.PodSpec, triggerFn TriggeredByFunc, fn func(TemplateImage, error)) {
-	for i := range pod.Containers {
-		eachTemplateImage(&pod.Containers[i], triggerFn, fn)
-	}
-	for i := range pod.InitContainers {
-		eachTemplateImage(&pod.InitContainers[i], triggerFn, fn)
-	}
-}
-
-// TriggeredByFunc returns a TemplateImage or error from the provided container
-type TriggeredByFunc func(container *kapi.Container) (TemplateImage, bool)
-
-// IgnoreTriggers ignores the triggers
-func IgnoreTriggers(container *kapi.Container) (TemplateImage, bool) {
-	return TemplateImage{}, false
-}
-
-// DeploymentConfigHasTrigger returns a function that can identify the image for each container.
-func DeploymentConfigHasTrigger(config *DeploymentConfig) TriggeredByFunc {
-	return func(container *kapi.Container) (TemplateImage, bool) {
-		for _, trigger := range config.Spec.Triggers {
-			params := trigger.ImageChangeParams
-			if params == nil {
-				continue
-			}
-			for _, name := range params.ContainerNames {
-				if container.Name == name {
-					if len(params.From.Name) == 0 {
-						continue
-					}
-					from := params.From
-					if len(from.Namespace) == 0 {
-						from.Namespace = config.Namespace
-					}
-					return TemplateImage{
-						Image: container.Image,
-						From:  &from,
-					}, true
-				}
-			}
-		}
-		return TemplateImage{}, false
-	}
-}
diff --git a/pkg/apps/apis/apps/types.go b/pkg/apps/apis/apps/types.go
index 918fd37ebc80..4523feb24037 100644
--- a/pkg/apps/apis/apps/types.go
+++ b/pkg/apps/apis/apps/types.go
@@ -27,9 +27,6 @@ const (
 
 // These constants represent keys used for correlating objects related to deployments.
 const (
-	// DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the
-	// DeploymentConfig on which the deployment is based.
-	DeploymentConfigAnnotation = "openshift.io/deployment-config.name"
 	// DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name
 	// of the deployment (a ReplicationController) on which the deployer Pod acts.
 	DeploymentAnnotation = "openshift.io/deployment.name"
@@ -43,23 +40,9 @@ const (
 	// DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded
 	// DeploymentConfig on which a given deployment is based.
 	DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config"
-	// DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The
-	// annotation value is the LatestVersion value of the DeploymentConfig which was the basis for
-	// the deployment.
-	DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version"
 	// DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state
 	// Used for specifying the reason for cancellation or failure of a deployment
 	DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason"
-	// DeploymentCancelledAnnotation indicates that the deployment has been cancelled
-	// The annotation value does not matter and its mere presence indicates cancellation
-	DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled"
-)
-
-// These constants represent values used in deployment annotations.
-const (
-	// DeploymentCancelledAnnotationValue represents the value for the DeploymentCancelledAnnotation
-	// annotation that signifies that the deployment should be cancelled
-	DeploymentCancelledAnnotationValue = "true"
 )
 
 // DeploymentStatus describes the possible states a deployment can be in.
diff --git a/pkg/apps/apis/apps/validation/validation.go b/pkg/apps/apis/apps/validation/validation.go
index bb2f5f80278b..22028f8a39a3 100644
--- a/pkg/apps/apis/apps/validation/validation.go
+++ b/pkg/apps/apis/apps/validation/validation.go
@@ -313,7 +313,7 @@ func validateLifecycleHook(hook *appsapi.LifecycleHook, pod *kapi.PodSpec, fldPa
 			if len(image.ContainerName) == 0 {
 				errs = append(errs, field.Required(fldPath.Child("tagImages").Index(i).Child("containerName"), "a containerName is required"))
 			} else {
-				if _, err := appsapi.TemplateImageForContainer(pod, appsapi.IgnoreTriggers, image.ContainerName); err != nil {
+				if err := verifyTemplateImageForContainer(pod, image.ContainerName); err != nil {
 					errs = append(errs, field.Invalid(fldPath.Child("tagImages").Index(i).Child("containerName"), image.ContainerName, err.Error()))
 				}
 			}
@@ -331,6 +331,24 @@ func validateLifecycleHook(hook *appsapi.LifecycleHook, pod *kapi.PodSpec, fldPa
 	return errs
 }
 
+func verifyTemplateImageForContainer(pod *kapi.PodSpec, containerName string) error {
+	containers := []kapi.Container{}
+	containers = append(containers, pod.Containers...)
+	containers = append(containers, pod.InitContainers...)
+	for i := range containers {
+		container := &containers[i]
+		if container.Name != containerName {
+			continue
+		}
+		if _, err := imageapi.ParseDockerImageReference(container.Image); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	return fmt.Errorf("no container %q found", containerName)
+}
+
 func validateExecNewPod(hook *appsapi.ExecNewPodHook, fldPath *field.Path) field.ErrorList {
 	errs := field.ErrorList{}
 
diff --git a/pkg/apps/apis/apps/zz_generated.deepcopy.go b/pkg/apps/apis/apps/zz_generated.deepcopy.go
index f5ff0b2c9b44..407754f8d3b2 100644
--- a/pkg/apps/apis/apps/zz_generated.deepcopy.go
+++ b/pkg/apps/apis/apps/zz_generated.deepcopy.go
@@ -744,45 +744,3 @@ func (in *TagImageHook) DeepCopy() *TagImageHook {
 	in.DeepCopyInto(out)
 	return out
 }
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TemplateImage) DeepCopyInto(out *TemplateImage) {
-	*out = *in
-	if in.Ref != nil {
-		in, out := &in.Ref, &out.Ref
-		if *in == nil {
-			*out = nil
-		} else {
-			*out = (*in).DeepCopy()
-		}
-	}
-	if in.From != nil {
-		in, out := &in.From, &out.From
-		if *in == nil {
-			*out = nil
-		} else {
-			*out = new(core.ObjectReference)
-			**out = **in
-		}
-	}
-	if in.Container != nil {
-		in, out := &in.Container, &out.Container
-		if *in == nil {
-			*out = nil
-		} else {
-			*out = new(core.Container)
-			(*in).DeepCopyInto(*out)
-		}
-	}
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateImage.
-func (in *TemplateImage) DeepCopy() *TemplateImage {
-	if in == nil {
-		return nil
-	}
-	out := new(TemplateImage)
-	in.DeepCopyInto(out)
-	return out
-}
diff --git a/pkg/apps/metrics/prometheus/metrics_test.go b/pkg/apps/metrics/prometheus/metrics_test.go
index 3bf830e2bb44..115ef68e2d59 100644
--- a/pkg/apps/metrics/prometheus/metrics_test.go
+++ b/pkg/apps/metrics/prometheus/metrics_test.go
@@ -9,12 +9,12 @@ import (
 	"github.com/prometheus/client_golang/prometheus"
 	dto "github.com/prometheus/client_model/go"
 
-	kapiv1 "k8s.io/api/core/v1"
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	kcorelisters "k8s.io/client-go/listers/core/v1"
 	"k8s.io/client-go/tools/cache"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsutil "github.com/openshift/origin/pkg/apps/util"
 )
 
 var (
@@ -22,9 +22,9 @@ var (
 	defaultTimeNowFn = func() time.Time { return timeNow.Time }
 )
 
-func mockRC(name string, version int, annotations map[string]string, generation int64, creationTime metav1.Time) *kapiv1.ReplicationController {
-	r := &kapiv1.ReplicationController{}
-	annotations[appsapi.DeploymentConfigAnnotation] = name
+func mockRC(name string, version int, annotations map[string]string, generation int64, creationTime metav1.Time) *corev1.ReplicationController {
+	r := &corev1.ReplicationController{}
+	annotations[appsutil.DeploymentConfigAnnotation] = name
 	r.SetName(name + fmt.Sprintf("-%d", version))
 	r.SetNamespace("test")
 	r.SetCreationTimestamp(creationTime)
@@ -36,7 +36,7 @@ func TestCollect(t *testing.T) {
 	tests := []struct {
 		name  string
 		count int
-		rcs   []*kapiv1.ReplicationController
+		rcs   []*corev1.ReplicationController
 		// expected values
 		available     float64
 		failed        float64
@@ -50,7 +50,7 @@ func TestCollect(t *testing.T) {
 			available: 0,
 			failed:    0,
 			cancelled: 0,
-			rcs:       []*kapiv1.ReplicationController{},
+			rcs:       []*corev1.ReplicationController{},
 		},
 		{
 			name:      "single successful deployment",
@@ -58,9 +58,9 @@ func TestCollect(t *testing.T) {
 			available: 1,
 			failed:    0,
 			cancelled: 0,
-			rcs: []*kapiv1.ReplicationController{
+			rcs: []*corev1.ReplicationController{
 				mockRC("foo", 1, map[string]string{
-					appsapi.DeploymentStatusAnnotation: string(appsapi.DeploymentStatusComplete),
+					appsutil.DeploymentStatusAnnotation: string(appsutil.DeploymentStatusComplete),
 				}, 0, timeNow),
 			},
 		},
@@ -72,11 +72,11 @@ func TestCollect(t *testing.T) {
 			cancelled:     1,
 			latestVersion: "1",
 			timestamp:     float64(timeNow.Unix()),
-			rcs: []*kapiv1.ReplicationController{
+			rcs: []*corev1.ReplicationController{
 				mockRC("foo", 1, map[string]string{
-					appsapi.DeploymentCancelledAnnotation: appsapi.DeploymentCancelledAnnotationValue,
-					appsapi.DeploymentStatusAnnotation:    string(appsapi.DeploymentStatusFailed),
-					appsapi.DeploymentVersionAnnotation:   "1",
+					appsutil.DeploymentCancelledAnnotation: appsutil.DeploymentCancelledAnnotationValue,
+					appsutil.DeploymentStatusAnnotation:    string(appsutil.DeploymentStatusFailed),
+					appsutil.DeploymentVersionAnnotation:   "1",
 				}, 0, timeNow),
 			},
 		},
@@ -88,10 +88,10 @@ func TestCollect(t *testing.T) {
 			cancelled:     0,
 			latestVersion: "1",
 			timestamp:     float64(timeNow.Unix()),
-			rcs: []*kapiv1.ReplicationController{
+			rcs: []*corev1.ReplicationController{
 				mockRC("foo", 1, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusFailed),
-					appsapi.DeploymentVersionAnnotation: "1",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusFailed),
+					appsutil.DeploymentVersionAnnotation: "1",
 				}, 0, timeNow),
 			},
 		},
@@ -103,22 +103,22 @@ func TestCollect(t *testing.T) {
 			cancelled:     0,
 			latestVersion: "4",
 			timestamp:     float64(timeNow.Unix()),
-			rcs: []*kapiv1.ReplicationController{
+			rcs: []*corev1.ReplicationController{
 				mockRC("foo", 1, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusFailed),
-					appsapi.DeploymentVersionAnnotation: "1",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusFailed),
+					appsutil.DeploymentVersionAnnotation: "1",
 				}, 0, timeNow),
 				mockRC("foo", 2, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusFailed),
-					appsapi.DeploymentVersionAnnotation: "2",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusFailed),
+					appsutil.DeploymentVersionAnnotation: "2",
 				}, 0, timeNow),
 				mockRC("foo", 3, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusFailed),
-					appsapi.DeploymentVersionAnnotation: "3",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusFailed),
+					appsutil.DeploymentVersionAnnotation: "3",
 				}, 0, timeNow),
 				mockRC("foo", 4, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusFailed),
-					appsapi.DeploymentVersionAnnotation: "4",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusFailed),
+					appsutil.DeploymentVersionAnnotation: "4",
 				}, 0, timeNow),
 			},
 		},
@@ -130,18 +130,18 @@ func TestCollect(t *testing.T) {
 			cancelled:     0,
 			latestVersion: "2",
 			timestamp:     float64(timeNow.Unix()),
-			rcs: []*kapiv1.ReplicationController{
+			rcs: []*corev1.ReplicationController{
 				mockRC("foo", 1, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusComplete),
-					appsapi.DeploymentVersionAnnotation: "1",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusComplete),
+					appsutil.DeploymentVersionAnnotation: "1",
 				}, 0, timeNow),
 				mockRC("foo", 2, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusFailed),
-					appsapi.DeploymentVersionAnnotation: "2",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusFailed),
+					appsutil.DeploymentVersionAnnotation: "2",
 				}, 0, timeNow),
 				mockRC("foo", 3, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusComplete),
-					appsapi.DeploymentVersionAnnotation: "3",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusComplete),
+					appsutil.DeploymentVersionAnnotation: "3",
 				}, 0, timeNow),
 			},
 		},
@@ -155,10 +155,10 @@ func TestCollect(t *testing.T) {
 			// the timestamp is duration in this case, which is 0 as the creation time
 			// and current time are the same.
 			timestamp: 0,
-			rcs: []*kapiv1.ReplicationController{
+			rcs: []*corev1.ReplicationController{
 				mockRC("foo", 1, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusRunning),
-					appsapi.DeploymentVersionAnnotation: "1",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusRunning),
+					appsutil.DeploymentVersionAnnotation: "1",
 				}, 0, timeNow),
 			},
 		},
@@ -172,18 +172,18 @@ func TestCollect(t *testing.T) {
 			// the timestamp is duration in this case, which is 0 as the creation time
 			// and current time are the same.
 			timestamp: 0,
-			rcs: []*kapiv1.ReplicationController{
+			rcs: []*corev1.ReplicationController{
 				mockRC("foo", 1, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusComplete),
-					appsapi.DeploymentVersionAnnotation: "1",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusComplete),
+					appsutil.DeploymentVersionAnnotation: "1",
 				}, 0, timeNow),
 				mockRC("foo", 2, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusComplete),
-					appsapi.DeploymentVersionAnnotation: "2",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusComplete),
+					appsutil.DeploymentVersionAnnotation: "2",
 				}, 0, timeNow),
 				mockRC("foo", 3, map[string]string{
-					appsapi.DeploymentStatusAnnotation:  string(appsapi.DeploymentStatusRunning),
-					appsapi.DeploymentVersionAnnotation: "3",
+					appsutil.DeploymentStatusAnnotation:  string(appsutil.DeploymentStatusRunning),
+					appsutil.DeploymentVersionAnnotation: "3",
 				}, 0, timeNow),
 			},
 		},
diff --git a/pkg/apps/util/const.go b/pkg/apps/util/const.go
index cc033662376b..c30d677af64e 100644
--- a/pkg/apps/util/const.go
+++ b/pkg/apps/util/const.go
@@ -4,7 +4,6 @@ package util
 type DeploymentStatus string
 
 const (
-
 	// TODO: Should move to openshift/api
 	// DeploymentStatusNew means the deployment has been accepted but not yet acted upon.
 	DeploymentStatusNew DeploymentStatus = "New"
@@ -101,4 +100,17 @@ const (
 	deploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config"
 
 	deploymentVersionAnnotation = "openshift.io/deployment-config.latest-version"
+
+	// DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The
+	// annotation value is the LatestVersion value of the DeploymentConfig which was the basis for
+	// the deployment.
+	DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version"
+
+	// DeploymentCancelledAnnotation indicates that the deployment has been cancelled
+	// The annotation value does not matter and its mere presence indicates cancellation
+	DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled"
+
+	// DeploymentCancelledAnnotationValue represents the value for the DeploymentCancelledAnnotation
+	// annotation that signifies that the deployment should be cancelled
+	DeploymentCancelledAnnotationValue = "true"
 )
diff --git a/pkg/image/apis/image/v1/conversion.go b/pkg/image/apis/image/v1/conversion.go
index 0b70cbb09813..3e4ec564961c 100644
--- a/pkg/image/apis/image/v1/conversion.go
+++ b/pkg/image/apis/image/v1/conversion.go
@@ -28,10 +28,7 @@ func init() {
 
 // The docker metadata must be cast to a version
 func Convert_image_Image_To_v1_Image(in *newer.Image, out *v1.Image, s conversion.Scope) error {
-	if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
-		return err
-	}
-
+	out.ObjectMeta = in.ObjectMeta
 	out.DockerImageReference = in.DockerImageReference
 	out.DockerImageManifest = in.DockerImageManifest
 	out.DockerImageManifestMediaType = in.DockerImageManifestMediaType
@@ -91,10 +88,7 @@ func Convert_image_Image_To_v1_Image(in *newer.Image, out *v1.Image, s conversio
 }
 
 func Convert_v1_Image_To_image_Image(in *v1.Image, out *newer.Image, s conversion.Scope) error {
-	if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {
-		return err
-	}
-
+	out.ObjectMeta = in.ObjectMeta
 	out.DockerImageReference = in.DockerImageReference
 	out.DockerImageManifest = in.DockerImageManifest
 	out.DockerImageManifestMediaType = in.DockerImageManifestMediaType
diff --git a/pkg/image/apiserver/admission/limitrange/admission.go b/pkg/image/apiserver/admission/limitrange/admission.go
index bba7e46e1454..99e0354f5f0d 100644
--- a/pkg/image/apiserver/admission/limitrange/admission.go
+++ b/pkg/image/apiserver/admission/limitrange/admission.go
@@ -140,7 +140,7 @@ func (a *imageLimitRangerPlugin) ValidateLimit(limitRange *kapi.LimitRange, kind
 	}
 
 	image := &isObj.Image
-	if err := util.ImageWithMetadata(image); err != nil {
+	if err := util.InternalImageWithMetadata(image); err != nil {
 		return err
 	}
 
diff --git a/pkg/image/apiserver/registry/image/strategy.go b/pkg/image/apiserver/registry/image/strategy.go
index 07d00fee49aa..5db798c9fd9c 100644
--- a/pkg/image/apiserver/registry/image/strategy.go
+++ b/pkg/image/apiserver/registry/image/strategy.go
@@ -45,7 +45,7 @@ func (imageStrategy) NamespaceScoped() bool {
 func (s imageStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
 	newImage := obj.(*imageapi.Image)
 	// ignore errors, change in place
-	if err := util.ImageWithMetadata(newImage); err != nil {
+	if err := util.InternalImageWithMetadata(newImage); err != nil {
 		utilruntime.HandleError(fmt.Errorf("Unable to update image metadata for %q: %v", newImage.Name, err))
 	}
 	if newImage.Annotations[imageapi.ImageManifestBlobStoredAnnotation] == "true" {
@@ -124,7 +124,7 @@ func (s imageStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Ob
 		}
 	}
 
-	if err = util.ImageWithMetadata(newImage); err != nil {
+	if err = util.InternalImageWithMetadata(newImage); err != nil {
 		utilruntime.HandleError(fmt.Errorf("Unable to update image metadata for %q: %v", newImage.Name, err))
 	}
 
diff --git a/pkg/image/apiserver/registry/imagestreamimage/rest.go b/pkg/image/apiserver/registry/imagestreamimage/rest.go
index ffdb4088917a..727fb826d6ac 100644
--- a/pkg/image/apiserver/registry/imagestreamimage/rest.go
+++ b/pkg/image/apiserver/registry/imagestreamimage/rest.go
@@ -93,7 +93,7 @@ func (r *REST) Get(ctx context.Context, id string, options *metav1.GetOptions) (
 	if err != nil {
 		return nil, err
 	}
-	if err := util.ImageWithMetadata(image); err != nil {
+	if err := util.InternalImageWithMetadata(image); err != nil {
 		return nil, err
 	}
 	image.DockerImageManifest = ""
diff --git a/pkg/image/apiserver/registry/imagestreamimport/rest.go b/pkg/image/apiserver/registry/imagestreamimport/rest.go
index eee6d573e007..1c4e8a4da813 100644
--- a/pkg/image/apiserver/registry/imagestreamimport/rest.go
+++ b/pkg/image/apiserver/registry/imagestreamimport/rest.go
@@ -520,7 +520,7 @@ func (r *REST) importSuccessful(
 	updated, err := r.images.Create(ctx, image, rest.ValidateAllObjectFunc, false)
 	switch {
 	case kapierrors.IsAlreadyExists(err):
-		if err := util.ImageWithMetadata(image); err != nil {
+		if err := util.InternalImageWithMetadata(image); err != nil {
 			glog.V(4).Infof("Unable to update image metadata during image import when image already exists %q: %v", image.Name, err)
 		}
 		updated = image
diff --git a/pkg/image/apiserver/registry/imagestreamtag/rest.go b/pkg/image/apiserver/registry/imagestreamtag/rest.go
index c34a7d41bbcd..c0c228bcba24 100644
--- a/pkg/image/apiserver/registry/imagestreamtag/rest.go
+++ b/pkg/image/apiserver/registry/imagestreamtag/rest.go
@@ -444,7 +444,7 @@ func newISTag(tag string, imageStream *imageapi.ImageStream, image *imageapi.Ima
 	}
 
 	if image != nil {
-		if err := util.ImageWithMetadata(image); err != nil {
+		if err := util.InternalImageWithMetadata(image); err != nil {
 			return nil, err
 		}
 		image.DockerImageManifest = ""
diff --git a/pkg/image/importer/importer.go b/pkg/image/importer/importer.go
index 875bd9c0022b..131794a66af0 100644
--- a/pkg/image/importer/importer.go
+++ b/pkg/image/importer/importer.go
@@ -444,7 +444,7 @@ func (isi *ImageStreamImporter) importManifest(ctx gocontext.Context, manifest d
 		return
 	}
 
-	if err := util.ImageWithMetadata(image); err != nil {
+	if err := util.InternalImageWithMetadata(image); err != nil {
 		return image, err
 	}
 
diff --git a/pkg/image/util/helpers.go b/pkg/image/util/helpers.go
index d7e19418651a..7529186b7021 100644
--- a/pkg/image/util/helpers.go
+++ b/pkg/image/util/helpers.go
@@ -4,31 +4,36 @@ import (
 	"encoding/json"
 	"fmt"
 
-	"k8s.io/apimachinery/pkg/util/sets"
-
 	"github.com/docker/distribution/manifest/schema1"
 	"github.com/docker/distribution/manifest/schema2"
 	"github.com/golang/glog"
 	godigest "github.com/opencontainers/go-digest"
 
+	"k8s.io/apimachinery/pkg/util/sets"
+
+	dockerv10 "github.com/openshift/api/image/docker10"
 	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	"github.com/openshift/origin/pkg/image/apis/image/docker10"
+	dockerapi10 "github.com/openshift/origin/pkg/image/apis/image/docker10"
 )
 
-func getImageLayers(manifest docker10.DockerImageManifest) ([]imageapi.ImageLayer, error) {
-	var imageLayers []imageapi.ImageLayer
+func fillImageLayers(image *imageapi.Image, manifest dockerapi10.DockerImageManifest) error {
+	if len(image.DockerImageLayers) != 0 {
+		// DockerImageLayers is already filled by the registry.
+		return nil
+	}
+
 	switch manifest.SchemaVersion {
 	case 1:
 		if len(manifest.History) != len(manifest.FSLayers) {
-			return nil, fmt.Errorf("mismatched history and fslayer cardinality (%d != %d)", len(manifest.History), len(manifest.FSLayers))
+			return fmt.Errorf("the image %s (%s) has mismatched history and fslayer cardinality (%d != %d)", image.Name, image.DockerImageReference, len(manifest.History), len(manifest.FSLayers))
 		}
 
-		imageLayers = make([]imageapi.ImageLayer, len(manifest.FSLayers))
+		image.DockerImageLayers = make([]imageapi.ImageLayer, len(manifest.FSLayers))
 		for i, obj := range manifest.History {
 			layer := manifest.FSLayers[i]
 
-			var size docker10.DockerV1CompatibilityImageSize
+			var size dockerapi10.DockerV1CompatibilityImageSize
 			if err := json.Unmarshal([]byte(obj.DockerV1Compatibility), &size); err != nil {
 				size.Size = 0
 			}
@@ -39,192 +44,170 @@ func getImageLayers(manifest docker10.DockerImageManifest) ([]imageapi.ImageLaye
 			// in order from the oldest to the youngest.
 			revidx := (len(manifest.History) - 1) - i // n-1, n-2, ..., 1, 0
 
-			imageLayers[revidx].Name = layer.DockerBlobSum
-			imageLayers[revidx].LayerSize = size.Size
-			imageLayers[revidx].MediaType = schema1.MediaTypeManifestLayer
+			image.DockerImageLayers[revidx].Name = layer.DockerBlobSum
+			image.DockerImageLayers[revidx].LayerSize = size.Size
+			image.DockerImageLayers[revidx].MediaType = schema1.MediaTypeManifestLayer
 		}
 	case 2:
 		// The layer list is ordered starting from the base image (opposite order of schema1).
 		// So, we do not need to change the order of layers.
-		imageLayers = make([]imageapi.ImageLayer, len(manifest.Layers))
+		image.DockerImageLayers = make([]imageapi.ImageLayer, len(manifest.Layers))
 		for i, layer := range manifest.Layers {
-			imageLayers[i].Name = layer.Digest
-			imageLayers[i].LayerSize = layer.Size
-			imageLayers[i].MediaType = layer.MediaType
+			image.DockerImageLayers[i].Name = layer.Digest
+			image.DockerImageLayers[i].LayerSize = layer.Size
+			image.DockerImageLayers[i].MediaType = layer.MediaType
 		}
 	default:
-		return nil, fmt.Errorf("unrecognized Docker image manifest schema %d", manifest.SchemaVersion)
-	}
-
-	return imageLayers, nil
-}
-
-// reorderImageLayers mutates the given image. It reorders the layers in ascending order.
-// Ascending order matches the order of layers in schema 2. Schema 1 has reversed (descending) order of layers.
-func reorderImageLayers(imageLayers []imageapi.ImageLayer, layersOrder, imageManifestMediaType string) bool {
-	if imageLayers == nil || len(imageLayers) == 0 {
-		return false
-	}
-
-	if layersOrder == "" {
-		switch imageManifestMediaType {
-		case schema1.MediaTypeManifest, schema1.MediaTypeSignedManifest:
-			layersOrder = imageapi.DockerImageLayersOrderAscending
-		case schema2.MediaTypeManifest:
-			layersOrder = imageapi.DockerImageLayersOrderDescending
-		default:
-			return false
-		}
-	}
-
-	if layersOrder == imageapi.DockerImageLayersOrderDescending {
-		// reverse order of the layers (lowest = 0, highest = i)
-		for i, j := 0, len(imageLayers)-1; i < j; i, j = i+1, j-1 {
-			imageLayers[i], imageLayers[j] = imageLayers[j], imageLayers[i]
-		}
+		return fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, image.Name, image.DockerImageReference)
 	}
 
-	return true
-}
-
-func convertImageLayers(imageLayers []imagev1.ImageLayer) []imageapi.ImageLayer {
-	if imageLayers == nil {
-		return nil
-	}
-
-	result := make([]imageapi.ImageLayer, len(imageLayers))
-	for i := range imageLayers {
-		result[i].MediaType = imageLayers[i].MediaType
-		result[i].Name = imageLayers[i].Name
-		result[i].LayerSize = imageLayers[i].LayerSize
-	}
-	return result
-}
-
-func GetImageMetadata(image *imagev1.Image) (imageapi.DockerImage, error) {
-	if len(image.DockerImageManifest) == 0 {
-		return imageapi.DockerImage{}, nil
+	if image.Annotations == nil {
+		image.Annotations = map[string]string{}
 	}
+	image.Annotations[imageapi.DockerImageLayersOrderAnnotation] = imageapi.DockerImageLayersOrderAscending
 
-	imageLayers := convertImageLayers(image.DockerImageLayers)
-	reorderImageLayers(imageLayers, image.Annotations[imageapi.DockerImageLayersOrderAnnotation], image.DockerImageManifestMediaType)
-
-	_, imageMetadata, _, _, err := getImageMetadata(image.Name, image.DockerImageReference,
-		image.DockerImageManifest, image.DockerImageConfig, imageLayers)
-	return imageMetadata, err
-
+	return nil
 }
 
-// ImageWithMetadata mutates the given image. It parses raw DockerImageManifest data stored in the image and
+// InternalImageWithMetadata mutates the given image. It parses raw DockerImageManifest data stored in the image and
 // fills its DockerImageMetadata and other fields.
-func ImageWithMetadata(image *imageapi.Image) error {
+func InternalImageWithMetadata(image *imageapi.Image) error {
 	if len(image.DockerImageManifest) == 0 {
 		return nil
 	}
 
-	if ok := reorderImageLayers(image.DockerImageLayers,
-		image.Annotations[imageapi.DockerImageLayersOrderAnnotation], image.DockerImageManifestMediaType); ok {
-		if image.Annotations == nil {
-			image.Annotations = map[string]string{}
-		}
-		image.Annotations[imageapi.DockerImageLayersOrderAnnotation] = imageapi.DockerImageLayersOrderAscending
-	}
+	ReorderImageLayers(image)
 
 	if len(image.DockerImageLayers) > 0 && image.DockerImageMetadata.Size > 0 && len(image.DockerImageManifestMediaType) > 0 {
 		glog.V(5).Infof("Image metadata already filled for %s", image.Name)
 		return nil
 	}
-	imageManifestMediaType, imageMetadata, imageLayers, orderAscending, err := getImageMetadata(image.Name, image.DockerImageReference,
-		image.DockerImageManifest, image.DockerImageConfig, image.DockerImageLayers)
-	if err != nil {
-		return err
-	}
-	image.DockerImageManifestMediaType = imageManifestMediaType
-	image.DockerImageMetadata = imageMetadata
-	image.DockerImageLayers = imageLayers
-	if orderAscending {
-		if image.Annotations == nil {
-			image.Annotations = map[string]string{}
-		}
-		image.Annotations[imageapi.DockerImageLayersOrderAnnotation] = imageapi.DockerImageLayersOrderAscending
-	}
-
-	return nil
-}
 
-func getImageMetadata(imageName, imageReference, imageManifest, imageConfig string,
-	imageLayers []imageapi.ImageLayer) (string, imageapi.DockerImage, []imageapi.ImageLayer, bool, error) {
-	manifest := docker10.DockerImageManifest{}
-	if err := json.Unmarshal([]byte(imageManifest), &manifest); err != nil {
-		return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, err
+	manifest := dockerapi10.DockerImageManifest{}
+	if err := json.Unmarshal([]byte(image.DockerImageManifest), &manifest); err != nil {
+		return err
 	}
 
-	var err error
-	var orderAscending bool
-	if len(imageLayers) == 0 {
-		imageLayers, err = getImageLayers(manifest)
-		if err != nil {
-			return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, fmt.Errorf("the image %s (%s) failed reading layers: %v", imageName, imageReference, err)
-		}
-		orderAscending = true
+	err := fillImageLayers(image, manifest)
+	if err != nil {
+		return err
 	}
 
-	var imageManifestMediaType string
-	var imageMetadata imageapi.DockerImage
 	switch manifest.SchemaVersion {
 	case 1:
-		imageManifestMediaType = schema1.MediaTypeManifest
+		image.DockerImageManifestMediaType = schema1.MediaTypeManifest
 
 		if len(manifest.History) == 0 {
 			// It should never have an empty history, but just in case.
-			return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, fmt.Errorf("the image %s (%s) has a schema 1 manifest, but it doesn't have history", imageName, imageReference)
+			return fmt.Errorf("the image %s (%s) has a schema 1 manifest, but it doesn't have history", image.Name, image.DockerImageReference)
 		}
 
-		v1Metadata := docker10.DockerV1CompatibilityImage{}
+		v1Metadata := dockerapi10.DockerV1CompatibilityImage{}
 		if err := json.Unmarshal([]byte(manifest.History[0].DockerV1Compatibility), &v1Metadata); err != nil {
-			return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, err
+			return err
 		}
 
-		if err := imageapi.Convert_compatibility_to_api_DockerImage(&v1Metadata, &imageMetadata); err != nil {
-			return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, err
+		if err := imageapi.Convert_compatibility_to_api_DockerImage(&v1Metadata, &image.DockerImageMetadata); err != nil {
+			return err
 		}
 	case 2:
-		imageManifestMediaType = schema2.MediaTypeManifest
+		image.DockerImageManifestMediaType = schema2.MediaTypeManifest
 
-		if len(imageConfig) == 0 {
-			return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, fmt.Errorf("dockerImageConfig must not be empty for manifest schema 2")
+		if len(image.DockerImageConfig) == 0 {
+			return fmt.Errorf("dockerImageConfig must not be empty for manifest schema 2")
 		}
 
-		config := docker10.DockerImageConfig{}
-		if err := json.Unmarshal([]byte(imageConfig), &config); err != nil {
-			return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, fmt.Errorf("failed to parse dockerImageConfig: %v", err)
+		config := dockerapi10.DockerImageConfig{}
+		if err := json.Unmarshal([]byte(image.DockerImageConfig), &config); err != nil {
+			return fmt.Errorf("failed to parse dockerImageConfig: %v", err)
 		}
 
-		if err := imageapi.Convert_imageconfig_to_api_DockerImage(&config, &imageMetadata); err != nil {
-			return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, err
+		if err := imageapi.Convert_imageconfig_to_api_DockerImage(&config, &image.DockerImageMetadata); err != nil {
+			return err
 		}
-		imageMetadata.ID = manifest.Config.Digest
+		image.DockerImageMetadata.ID = manifest.Config.Digest
 
 	default:
-		return "", imageapi.DockerImage{}, []imageapi.ImageLayer{}, false, fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, imageName, imageReference)
+		return fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, image.Name, image.DockerImageReference)
 	}
 
 	layerSet := sets.NewString()
 	if manifest.SchemaVersion == 2 {
 		layerSet.Insert(manifest.Config.Digest)
-		imageMetadata.Size = int64(len(imageConfig))
+		image.DockerImageMetadata.Size = int64(len(image.DockerImageConfig))
 	} else {
-		imageMetadata.Size = 0
+		image.DockerImageMetadata.Size = 0
 	}
-	for _, layer := range imageLayers {
+	for _, layer := range image.DockerImageLayers {
 		if layerSet.Has(layer.Name) {
 			continue
 		}
 		layerSet.Insert(layer.Name)
-		imageMetadata.Size += layer.LayerSize
+		image.DockerImageMetadata.Size += layer.LayerSize
+	}
+
+	return nil
+}
+
+// ImageWithMetadata mutates the given image. It parses raw DockerImageManifest data stored in the image and
+// fills its DockerImageMetadata and other fields.
+// Copied from github.com/openshift/image-registry/pkg/origin-common/util/util.go
+func ImageWithMetadata(image *imagev1.Image) error {
+	// Check if the metadata are already filled in for this image.
+	meta, hasMetadata := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
+	if hasMetadata && meta.Size > 0 {
+		return nil
+	}
+
+	version := image.DockerImageMetadataVersion
+	if len(version) == 0 {
+		version = "1.0"
+	}
+
+	obj := &dockerv10.DockerImage{}
+	if len(image.DockerImageMetadata.Raw) != 0 {
+		if err := json.Unmarshal(image.DockerImageMetadata.Raw, obj); err != nil {
+			return err
+		}
+		image.DockerImageMetadata.Object = obj
+	}
+
+	image.DockerImageMetadataVersion = version
+
+	return nil
+}
+
+// ReorderImageLayers mutates the given image. It reorders the layers in ascending order.
+// Ascending order matches the order of layers in schema 2. Schema 1 has reversed (descending) order of layers.
+func ReorderImageLayers(image *imageapi.Image) {
+	if len(image.DockerImageLayers) == 0 {
+		return
+	}
+
+	layersOrder, ok := image.Annotations[imageapi.DockerImageLayersOrderAnnotation]
+	if !ok {
+		switch image.DockerImageManifestMediaType {
+		case schema1.MediaTypeManifest, schema1.MediaTypeSignedManifest:
+			layersOrder = imageapi.DockerImageLayersOrderAscending
+		case schema2.MediaTypeManifest:
+			layersOrder = imageapi.DockerImageLayersOrderDescending
+		default:
+			return
+		}
+	}
+
+	if layersOrder == imageapi.DockerImageLayersOrderDescending {
+		// reverse order of the layers (lowest = 0, highest = i)
+		for i, j := 0, len(image.DockerImageLayers)-1; i < j; i, j = i+1, j-1 {
+			image.DockerImageLayers[i], image.DockerImageLayers[j] = image.DockerImageLayers[j], image.DockerImageLayers[i]
+		}
+	}
+
+	if image.Annotations == nil {
+		image.Annotations = map[string]string{}
 	}
 
-	return imageManifestMediaType, imageMetadata, imageLayers, orderAscending, nil
+	image.Annotations[imageapi.DockerImageLayersOrderAnnotation] = imageapi.DockerImageLayersOrderAscending
 }
 
 // ManifestMatchesImage returns true if the provided manifest matches the name of the image.
diff --git a/pkg/image/util/helpers_test.go b/pkg/image/util/helpers_test.go
index c7d48a8fa94d..a8da7b662589 100644
--- a/pkg/image/util/helpers_test.go
+++ b/pkg/image/util/helpers_test.go
@@ -211,7 +211,7 @@ func TestImageWithMetadata(t *testing.T) {
 
 	for name, test := range tests {
 		imageWithMetadata := test.image
-		err := ImageWithMetadata(&imageWithMetadata)
+		err := InternalImageWithMetadata(&imageWithMetadata)
 		gotError := err != nil
 		if e, a := test.expectError, gotError; e != a {
 			t.Fatalf("%s: expectError=%t, gotError=%t: %s", name, e, a, err)
diff --git a/pkg/oc/cli/admin/buildchain/buildchain.go b/pkg/oc/cli/admin/buildchain/buildchain.go
index bc988becabc1..de54ea422e13 100644
--- a/pkg/oc/cli/admin/buildchain/buildchain.go
+++ b/pkg/oc/cli/admin/buildchain/buildchain.go
@@ -15,16 +15,13 @@ import (
 	"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
 
 	"github.com/openshift/api/image"
-	buildclientinternal "github.com/openshift/origin/pkg/build/generated/internalclientset"
-	buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset/typed/build/internalversion"
+	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
+	projectv1client "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1"
 	osutil "github.com/openshift/origin/pkg/cmd/util"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	imageclientinternal "github.com/openshift/origin/pkg/image/generated/internalclientset"
-	imageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/typed/image/internalversion"
 	"github.com/openshift/origin/pkg/oc/lib/describe"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
-	projectclientinternal "github.com/openshift/origin/pkg/project/generated/internalclientset"
-	projectclient "github.com/openshift/origin/pkg/project/generated/internalclientset/typed/project/internalversion"
 )
 
 // BuildChainRecommendedCommandName is the recommended command name
@@ -61,9 +58,9 @@ type BuildChainOptions struct {
 
 	output string
 
-	buildClient   buildclient.BuildConfigsGetter
-	imageClient   imageclient.ImageStreamTagsGetter
-	projectClient projectclient.ProjectsGetter
+	buildClient   buildv1client.BuildV1Interface
+	imageClient   imagev1client.ImageV1Interface
+	projectClient projectv1client.ProjectV1Interface
 }
 
 // NewCmdBuildChain implements the OpenShift experimental build-chain command
@@ -100,21 +97,18 @@ func (o *BuildChainOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, arg
 	if err != nil {
 		return err
 	}
-	buildClient, err := buildclientinternal.NewForConfig(clientConfig)
+	o.buildClient, err = buildv1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	imageClient, err := imageclientinternal.NewForConfig(clientConfig)
+	o.imageClient, err = imagev1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	projectClient, err := projectclientinternal.NewForConfig(clientConfig)
+	o.projectClient, err = projectv1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	o.buildClient = buildClient.Build()
-	o.imageClient = imageClient.Image()
-	o.projectClient = projectClient.Project()
 
 	resource := schema.GroupResource{}
 	mapper, err := f.ToRESTMapper()
@@ -147,14 +141,12 @@ func (o *BuildChainOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, arg
 		}
 	}
 
-	namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
+	o.defaultNamespace, _, err = f.ToRawKubeConfigLoader().Namespace()
 	if err != nil {
 		return err
 	}
-
-	o.defaultNamespace = namespace
 	glog.V(4).Infof("Using %q as the namespace for %q", o.defaultNamespace, o.name)
-	o.namespaces.Insert(namespace)
+	o.namespaces.Insert(o.defaultNamespace)
 	glog.V(4).Infof("Will look for deps in %s", strings.Join(o.namespaces.List(), ","))
 
 	return nil
diff --git a/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/aggregated_logging/deploymentconfigs.go b/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/aggregated_logging/deploymentconfigs.go
index 78d0798cf726..a5aeb3300764 100644
--- a/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/aggregated_logging/deploymentconfigs.go
+++ b/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/aggregated_logging/deploymentconfigs.go
@@ -10,7 +10,7 @@ import (
 	kapi "k8s.io/kubernetes/pkg/apis/core"
 
 	appsv1 "github.com/openshift/api/apps/v1"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsutil "github.com/openshift/origin/pkg/apps/util"
 )
 
 const (
@@ -113,7 +113,7 @@ func checkDeploymentConfigPods(r diagnosticReporter, adapter deploymentConfigAda
 
 	for _, pod := range podList.Items {
 		r.Debug("AGL0082", fmt.Sprintf("Checking status of Pod '%s'...", pod.ObjectMeta.Name))
-		dcName, hasDcName := pod.ObjectMeta.Annotations[appsapi.DeploymentConfigAnnotation]
+		dcName, hasDcName := pod.ObjectMeta.Annotations[appsutil.DeploymentConfigAnnotation]
 		if !hasDcName {
 			r.Warn("AGL0085", nil, fmt.Sprintf("Found Pod '%s' that that does not reference a logging deployment config which may be acceptable. Skipping check to see if its running.", pod.ObjectMeta.Name))
 			continue
diff --git a/pkg/oc/cli/admin/prune/deployments/data.go b/pkg/oc/cli/admin/prune/deployments/data.go
index eacec50ae55f..ea4f704c716a 100644
--- a/pkg/oc/cli/admin/prune/deployments/data.go
+++ b/pkg/oc/cli/admin/prune/deployments/data.go
@@ -9,7 +9,6 @@ import (
 	"k8s.io/client-go/tools/cache"
 
 	appsv1 "github.com/openshift/api/apps/v1"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
 	appsutil "github.com/openshift/origin/pkg/apps/util"
 )
 
@@ -146,7 +145,7 @@ func (d *dataSet) ListDeploymentsByDeploymentConfig(deploymentConfig *appsv1.Dep
 	key := &corev1.ReplicationController{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace:   deploymentConfig.Namespace,
-			Annotations: map[string]string{appsapi.DeploymentConfigAnnotation: deploymentConfig.Name},
+			Annotations: map[string]string{appsutil.DeploymentConfigAnnotation: deploymentConfig.Name},
 		},
 	}
 	items, err := d.deploymentIndexer.Index("deploymentConfig", key)
diff --git a/pkg/oc/cli/admin/prune/deployments/data_test.go b/pkg/oc/cli/admin/prune/deployments/data_test.go
index bb742b01a1ac..da1511e8540b 100644
--- a/pkg/oc/cli/admin/prune/deployments/data_test.go
+++ b/pkg/oc/cli/admin/prune/deployments/data_test.go
@@ -11,6 +11,7 @@ import (
 
 	appsv1 "github.com/openshift/api/apps/v1"
 	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsutil "github.com/openshift/origin/pkg/apps/util"
 )
 
 func mockDeploymentConfig(namespace, name string) *appsv1.DeploymentConfig {
@@ -29,7 +30,7 @@ func withCreated(item *corev1.ReplicationController, creationTimestamp metav1.Ti
 }
 
 func withStatus(item *corev1.ReplicationController, status appsapi.DeploymentStatus) *corev1.ReplicationController {
-	item.Annotations[appsapi.DeploymentStatusAnnotation] = string(status)
+	item.Annotations[appsutil.DeploymentStatusAnnotation] = string(status)
 	return item
 }
 
@@ -40,9 +41,9 @@ func mockDeployment(namespace, name string, deploymentConfig *appsv1.DeploymentC
 		Spec:       corev1.ReplicationControllerSpec{Replicas: &zero},
 	}
 	if deploymentConfig != nil {
-		item.Annotations[appsapi.DeploymentConfigAnnotation] = deploymentConfig.Name
+		item.Annotations[appsutil.DeploymentConfigAnnotation] = deploymentConfig.Name
 	}
-	item.Annotations[appsapi.DeploymentStatusAnnotation] = string(appsapi.DeploymentStatusNew)
+	item.Annotations[appsutil.DeploymentStatusAnnotation] = string(appsutil.DeploymentStatusNew)
 	return item
 }
 
@@ -132,7 +133,7 @@ func TestPopulatedDataSet(t *testing.T) {
 	dataSet := NewDataSet(deploymentConfigs, deployments)
 	for _, deployment := range deployments {
 		deploymentConfig, exists, err := dataSet.GetDeploymentConfig(deployment)
-		config, hasConfig := deployment.Annotations[appsapi.DeploymentConfigAnnotation]
+		config, hasConfig := deployment.Annotations[appsutil.DeploymentConfigAnnotation]
 		if hasConfig {
 			if err != nil {
 				t.Errorf("Item %v, unexpected error: %v", deployment, err)
diff --git a/pkg/oc/cli/admin/prune/imageprune/helper.go b/pkg/oc/cli/admin/prune/imageprune/helper.go
index c4b764f762ab..5ec7ec892e74 100644
--- a/pkg/oc/cli/admin/prune/imageprune/helper.go
+++ b/pkg/oc/cli/admin/prune/imageprune/helper.go
@@ -14,16 +14,16 @@ import (
 	kmeta "k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/runtime"
 	kerrors "k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/kubernetes/pkg/api/legacyscheme"
-	kapiref "k8s.io/kubernetes/pkg/api/ref"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	ref "k8s.io/client-go/tools/reference"
+	"k8s.io/kubernetes/pkg/kubectl/scheme"
 
+	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	"github.com/openshift/origin/pkg/util/netutils"
 )
 
 // order younger images before older
-type imgByAge []*imageapi.Image
+type imgByAge []*imagev1.Image
 
 func (ba imgByAge) Len() int      { return len(ba) }
 func (ba imgByAge) Swap(i, j int) { ba[i], ba[j] = ba[j], ba[i] }
@@ -32,7 +32,7 @@ func (ba imgByAge) Less(i, j int) bool {
 }
 
 // order younger image stream before older
-type isByAge []imageapi.ImageStream
+type isByAge []imagev1.ImageStream
 
 func (ba isByAge) Len() int      { return len(ba) }
 func (ba isByAge) Swap(i, j int) { ba[i], ba[j] = ba[j], ba[i] }
@@ -42,9 +42,9 @@ func (ba isByAge) Less(i, j int) bool {
 
 // DetermineRegistryHost returns registry host embedded in a pull-spec of the latest unmanaged image or the
 // latest imagestream from the provided lists. If no such pull-spec is found, error is returned.
-func DetermineRegistryHost(images *imageapi.ImageList, imageStreams *imageapi.ImageStreamList) (string, error) {
+func DetermineRegistryHost(images *imagev1.ImageList, imageStreams *imagev1.ImageStreamList) (string, error) {
 	var pullSpec string
-	var managedImages []*imageapi.Image
+	var managedImages []*imagev1.Image
 
 	// 1st try to determine registry url from a pull spec of the youngest managed image
 	for i := range images.Items {
@@ -294,24 +294,11 @@ func getKindName(obj *corev1.ObjectReference) string {
 	return fmt.Sprintf("%s[%s]", obj.Kind, name)
 }
 
-func getInternalRef(obj runtime.Object) *kapi.ObjectReference {
-	ref, err := kapiref.GetReference(legacyscheme.Scheme, obj)
-	if err != nil {
-		glog.Errorf("failed to get reference to object %T: %v", obj, err)
-		return nil
-	}
-	return ref
-}
-
 func getRef(obj runtime.Object) *corev1.ObjectReference {
-	ref, err := kapiref.GetReference(legacyscheme.Scheme, obj)
+	ref, err := ref.GetReference(scheme.Scheme, obj)
 	if err != nil {
 		glog.Errorf("failed to get reference to object %T: %v", obj, err)
 		return nil
 	}
-	result := &corev1.ObjectReference{}
-	if err := legacyscheme.Scheme.Convert(ref, result, nil); err != nil {
-		panic(err)
-	}
-	return result
+	return ref
 }
diff --git a/pkg/oc/cli/admin/prune/imageprune/prune.go b/pkg/oc/cli/admin/prune/imageprune/prune.go
index 26051ec124c8..1ecc063096e8 100644
--- a/pkg/oc/cli/admin/prune/imageprune/prune.go
+++ b/pkg/oc/cli/admin/prune/imageprune/prune.go
@@ -15,9 +15,9 @@ import (
 	"github.com/docker/distribution/registry/api/errcode"
 	"github.com/golang/glog"
 	gonum "github.com/gonum/graph"
-	corev1 "k8s.io/api/core/v1"
-	"k8s.io/kubernetes/pkg/api/legacyscheme"
 
+	kappsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
 	kerrapi "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,16 +26,15 @@ import (
 	"k8s.io/apimachinery/pkg/util/sets"
 	"k8s.io/apimachinery/pkg/watch"
 	"k8s.io/client-go/util/retry"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	kapisext "k8s.io/kubernetes/pkg/apis/extensions"
 
 	appsv1 "github.com/openshift/api/apps/v1"
 	buildv1 "github.com/openshift/api/build/v1"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
+	dockerv10 "github.com/openshift/api/image/docker10"
+	imagev1 "github.com/openshift/api/image/v1"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
 	"github.com/openshift/origin/pkg/build/buildapihelpers"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	imageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/typed/image/internalversion"
+	imageutil "github.com/openshift/origin/pkg/image/util"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	buildgraph "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/nodes"
 	"github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
@@ -94,18 +93,18 @@ type pruneAlgorithm struct {
 // ImageDeleter knows how to remove images from OpenShift.
 type ImageDeleter interface {
 	// DeleteImage removes the image from OpenShift's storage.
-	DeleteImage(image *imageapi.Image) error
+	DeleteImage(image *imagev1.Image) error
 }
 
 // ImageStreamDeleter knows how to remove an image reference from an image stream.
 type ImageStreamDeleter interface {
 	// GetImageStream returns a fresh copy of an image stream.
-	GetImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error)
+	GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error)
 	// UpdateImageStream removes all references to the image from the image
 	// stream's status.tags. The updated image stream is returned.
-	UpdateImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error)
+	UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error)
 	// NotifyImageStreamPrune shows notification about updated image stream.
-	NotifyImageStreamPrune(stream *imageapi.ImageStream, updatedTags []string, deletedTags []string)
+	NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string)
 }
 
 // BlobDeleter knows how to delete a blob from the Docker registry.
@@ -151,34 +150,34 @@ type PrunerOptions struct {
 	Namespace string
 	// Images is the entire list of images in OpenShift. An image must be in this
 	// list to be a candidate for pruning.
-	Images *imageapi.ImageList
+	Images *imagev1.ImageList
 	// ImageWatcher watches for image changes.
 	ImageWatcher watch.Interface
 	// Streams is the entire list of image streams across all namespaces in the
 	// cluster.
-	Streams *imageapi.ImageStreamList
+	Streams *imagev1.ImageStreamList
 	// StreamWatcher watches for stream changes.
 	StreamWatcher watch.Interface
 	// Pods is the entire list of pods across all namespaces in the cluster.
-	Pods *kapi.PodList
+	Pods *corev1.PodList
 	// RCs is the entire list of replication controllers across all namespaces in
 	// the cluster.
-	RCs *kapi.ReplicationControllerList
+	RCs *corev1.ReplicationControllerList
 	// BCs is the entire list of build configs across all namespaces in the
 	// cluster.
-	BCs *buildapi.BuildConfigList
+	BCs *buildv1.BuildConfigList
 	// Builds is the entire list of builds across all namespaces in the cluster.
-	Builds *buildapi.BuildList
+	Builds *buildv1.BuildList
 	// DSs is the entire list of daemon sets across all namespaces in the cluster.
-	DSs *kapisext.DaemonSetList
+	DSs *kappsv1.DaemonSetList
 	// Deployments is the entire list of kube's deployments across all namespaces in the cluster.
-	Deployments *kapisext.DeploymentList
+	Deployments *kappsv1.DeploymentList
 	// DCs is the entire list of deployment configs across all namespaces in the cluster.
 	DCs *appsv1.DeploymentConfigList
 	// RSs is the entire list of replica sets across all namespaces in the cluster.
-	RSs *kapisext.ReplicaSetList
+	RSs *kappsv1.ReplicaSetList
 	// LimitRanges is a map of LimitRanges across namespaces, being keys in this map.
-	LimitRanges map[string][]*kapi.LimitRange
+	LimitRanges map[string][]*corev1.LimitRange
 	// DryRun indicates that no changes will be made to the cluster and nothing
 	// will be removed.
 	DryRun bool
@@ -217,7 +216,7 @@ type pruner struct {
 	registryURL           *url.URL
 	imageWatcher          watch.Interface
 	imageStreamWatcher    watch.Interface
-	imageStreamLimits     map[string][]*kapi.LimitRange
+	imageStreamLimits     map[string][]*corev1.LimitRange
 	// sorted queue of images to prune; nil stands for empty queue
 	queue *nodeItem
 	// contains prunable images removed from queue that are currently being processed
@@ -349,15 +348,27 @@ func getValue(option interface{}) string {
 }
 
 // addImagesToGraph adds all images, their manifests and their layers to the graph.
-func (p *pruner) addImagesToGraph(images *imageapi.ImageList) []error {
+func (p *pruner) addImagesToGraph(images *imagev1.ImageList) []error {
+	var errs []error
 	for i := range images.Items {
 		image := &images.Items[i]
 
 		glog.V(4).Infof("Adding image %q to graph", image.Name)
 		imageNode := imagegraph.EnsureImageNode(p.g, image)
 
-		if image.DockerImageManifestMediaType == schema2.MediaTypeManifest && len(image.DockerImageMetadata.ID) > 0 {
-			configName := image.DockerImageMetadata.ID
+		if err := imageutil.ImageWithMetadata(image); err != nil {
+			glog.V(1).Infof("Failed to read image metadata for image %s: %v", image.Name, err)
+			errs = append(errs, err)
+			continue
+		}
+		dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
+		if !ok {
+			glog.V(1).Infof("Failed to read image metadata for image %s", image.Name)
+			errs = append(errs, fmt.Errorf("Failed to read image metadata for image %s", image.Name))
+			continue
+		}
+		if image.DockerImageManifestMediaType == schema2.MediaTypeManifest && len(dockerImage.ID) > 0 {
+			configName := dockerImage.ID
 			glog.V(4).Infof("Adding image config %q to graph", configName)
 			configNode := imagegraph.EnsureImageComponentConfigNode(p.g, configName)
 			p.g.AddEdge(imageNode, configNode, ReferencedImageConfigEdgeKind)
@@ -374,7 +385,7 @@ func (p *pruner) addImagesToGraph(images *imageapi.ImageList) []error {
 		p.g.AddEdge(imageNode, manifestNode, ReferencedImageManifestEdgeKind)
 	}
 
-	return nil
+	return errs
 }
 
 // addImageStreamsToGraph adds all the streams to the graph. The most recent n
@@ -390,7 +401,7 @@ func (p *pruner) addImagesToGraph(images *imageapi.ImageList) []error {
 //
 // addImageStreamsToGraph also adds references from each stream to all the
 // layers it references (via each image a stream references).
-func (p *pruner) addImageStreamsToGraph(streams *imageapi.ImageStreamList, limits map[string][]*kapi.LimitRange) []error {
+func (p *pruner) addImageStreamsToGraph(streams *imagev1.ImageStreamList, limits map[string][]*corev1.LimitRange) []error {
 	for i := range streams.Items {
 		stream := &streams.Items[i]
 
@@ -408,14 +419,14 @@ func (p *pruner) addImageStreamsToGraph(streams *imageapi.ImageStreamList, limit
 		isNode := imagegraph.EnsureImageStreamNode(p.g, stream)
 		imageStreamNode := isNode.(*imagegraph.ImageStreamNode)
 
-		for tag, history := range stream.Status.Tags {
-			istNode := imagegraph.EnsureImageStreamTagNode(p.g, makeISTagWithStream(stream, tag))
+		for _, tag := range stream.Status.Tags {
+			istNode := imagegraph.EnsureImageStreamTagNode(p.g, makeISTagWithStream(stream, tag.Tag))
 
-			for i, tagEvent := range history.Items {
-				imageNode := imagegraph.FindImage(p.g, history.Items[i].Image)
+			for i, tagEvent := range tag.Items {
+				imageNode := imagegraph.FindImage(p.g, tag.Items[i].Image)
 				if imageNode == nil {
 					glog.V(2).Infof("Unable to find image %q in graph (from tag=%q, revision=%d, dockerImageReference=%s) - skipping",
-						history.Items[i].Image, tag, tagEvent.Generation, history.Items[i].DockerImageReference)
+						tag.Items[i].Image, tag.Tag, tagEvent.Generation, tag.Items[i].DockerImageReference)
 					continue
 				}
 
@@ -474,23 +485,30 @@ func (p *pruner) addImageStreamsToGraph(streams *imageapi.ImageStreamList, limit
 }
 
 // exceedsLimits checks if given image exceeds LimitRanges defined in ImageStream's namespace.
-func exceedsLimits(is *imageapi.ImageStream, image *imageapi.Image, limits map[string][]*kapi.LimitRange) bool {
+func exceedsLimits(is *imagev1.ImageStream, image *imagev1.Image, limits map[string][]*corev1.LimitRange) bool {
 	limitRanges, ok := limits[is.Namespace]
 	if !ok || len(limitRanges) == 0 {
 		return false
 	}
 
-	imageSize := resource.NewQuantity(image.DockerImageMetadata.Size, resource.BinarySI)
+	if err := imageutil.ImageWithMetadata(image); err != nil {
+		return false
+	}
+	dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
+	if !ok {
+		return false
+	}
+	imageSize := resource.NewQuantity(dockerImage.Size, resource.BinarySI)
 	for _, limitRange := range limitRanges {
 		if limitRange == nil {
 			continue
 		}
 		for _, limit := range limitRange.Spec.Limits {
-			if limit.Type != imageapi.LimitTypeImage {
+			if limit.Type != imagev1.LimitTypeImage {
 				continue
 			}
 
-			limitQuantity, ok := limit.Max[kapi.ResourceStorage]
+			limitQuantity, ok := limit.Max[corev1.ResourceStorage]
 			if !ok {
 				continue
 			}
@@ -509,7 +527,7 @@ func exceedsLimits(is *imageapi.ImageStream, image *imageapi.Image, limits map[s
 //
 // Edges are added to the graph from each pod to the images specified by that
 // pod's list of containers, as long as the image is managed by OpenShift.
-func (p *pruner) addPodsToGraph(pods *kapi.PodList) []error {
+func (p *pruner) addPodsToGraph(pods *corev1.PodList) []error {
 	var errs []error
 
 	for i := range pods.Items {
@@ -521,7 +539,7 @@ func (p *pruner) addPodsToGraph(pods *kapi.PodList) []error {
 		// A pod is only *excluded* from being added to the graph if its phase is not
 		// pending or running. Additionally, it has to be at least as old as the minimum
 		// age threshold defined by the algorithm.
-		if pod.Status.Phase != kapi.PodRunning && pod.Status.Phase != kapi.PodPending {
+		if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodPending {
 			if !pod.CreationTimestamp.Time.After(p.algorithm.keepYoungerThan) {
 				glog.V(4).Infof("Ignoring %s for image reference counting because it's not running/pending and is too old", desc)
 				continue
@@ -540,7 +558,7 @@ func (p *pruner) addPodsToGraph(pods *kapi.PodList) []error {
 // Edges are added to the graph from each predecessor (pod or replication
 // controller) to the images specified by the pod spec's list of containers, as
 // long as the image is managed by OpenShift.
-func (p *pruner) addPodSpecToGraph(referrer *corev1.ObjectReference, spec *kapi.PodSpec, predecessor gonum.Node) []error {
+func (p *pruner) addPodSpecToGraph(referrer *corev1.ObjectReference, spec *corev1.PodSpec, predecessor gonum.Node) []error {
 	var errs []error
 
 	for j := range spec.Containers {
@@ -605,7 +623,7 @@ func (p *pruner) addPodSpecToGraph(referrer *corev1.ObjectReference, spec *kapi.
 // Edges are added to the graph from each replication controller to the images
 // specified by its pod spec's list of containers, as long as the image is
 // managed by OpenShift.
-func (p *pruner) addReplicationControllersToGraph(rcs *kapi.ReplicationControllerList) []error {
+func (p *pruner) addReplicationControllersToGraph(rcs *corev1.ReplicationControllerList) []error {
 	var errs []error
 
 	for i := range rcs.Items {
@@ -623,7 +641,7 @@ func (p *pruner) addReplicationControllersToGraph(rcs *kapi.ReplicationControlle
 //
 // Edges are added to the graph from each daemon set to the images specified by its pod spec's list of
 // containers, as long as the image is managed by OpenShift.
-func (p *pruner) addDaemonSetsToGraph(dss *kapisext.DaemonSetList) []error {
+func (p *pruner) addDaemonSetsToGraph(dss *kappsv1.DaemonSetList) []error {
 	var errs []error
 
 	for i := range dss.Items {
@@ -641,7 +659,7 @@ func (p *pruner) addDaemonSetsToGraph(dss *kapisext.DaemonSetList) []error {
 //
 // Edges are added to the graph from each deployment to the images specified by its pod spec's list of
 // containers, as long as the image is managed by OpenShift.
-func (p *pruner) addDeploymentsToGraph(dmnts *kapisext.DeploymentList) []error {
+func (p *pruner) addDeploymentsToGraph(dmnts *kappsv1.DeploymentList) []error {
 	var errs []error
 
 	for i := range dmnts.Items {
@@ -667,13 +685,8 @@ func (p *pruner) addDeploymentConfigsToGraph(dcs *appsv1.DeploymentConfigList) [
 		dc := &dcs.Items[i]
 		ref := getRef(dc)
 		glog.V(4).Infof("Examining %s", getKindName(ref))
-		internalDc := &appsapi.DeploymentConfig{}
-		if err := legacyscheme.Scheme.Convert(dc, internalDc, nil); err != nil {
-			errs = append(errs, err)
-			continue
-		}
-		dcNode := appsgraph.EnsureDeploymentConfigNode(p.g, internalDc)
-		errs = append(errs, p.addPodSpecToGraph(getRef(dc), &internalDc.Spec.Template.Spec, dcNode)...)
+		dcNode := appsgraph.EnsureDeploymentConfigNode(p.g, dc)
+		errs = append(errs, p.addPodSpecToGraph(getRef(dc), &dc.Spec.Template.Spec, dcNode)...)
 	}
 
 	return errs
@@ -683,7 +696,7 @@ func (p *pruner) addDeploymentConfigsToGraph(dcs *appsv1.DeploymentConfigList) [
 //
 // Edges are added to the graph from each replica set to the images specified by its pod spec's list of
 // containers, as long as the image is managed by OpenShift.
-func (p *pruner) addReplicaSetsToGraph(rss *kapisext.ReplicaSetList) []error {
+func (p *pruner) addReplicaSetsToGraph(rss *kappsv1.ReplicaSetList) []error {
 	var errs []error
 
 	for i := range rss.Items {
@@ -700,7 +713,7 @@ func (p *pruner) addReplicaSetsToGraph(rss *kapisext.ReplicaSetList) []error {
 // addBuildConfigsToGraph adds build configs to the graph.
 //
 // Edges are added to the graph from each build config to the image specified by its strategy.from.
-func (p *pruner) addBuildConfigsToGraph(bcs *buildapi.BuildConfigList) []error {
+func (p *pruner) addBuildConfigsToGraph(bcs *buildv1.BuildConfigList) []error {
 	var errs []error
 
 	for i := range bcs.Items {
@@ -717,7 +730,7 @@ func (p *pruner) addBuildConfigsToGraph(bcs *buildapi.BuildConfigList) []error {
 // addBuildsToGraph adds builds to the graph.
 //
 // Edges are added to the graph from each build to the image specified by its strategy.from.
-func (p *pruner) addBuildsToGraph(builds *buildapi.BuildList) []error {
+func (p *pruner) addBuildsToGraph(builds *buildv1.BuildList) []error {
 	var errs []error
 
 	for i := range builds.Items {
@@ -756,12 +769,8 @@ func (p *pruner) resolveISTagName(g genericgraph.Graph, referrer *corev1.ObjectR
 // Edges are added to the graph from each predecessor (build or build config)
 // to the image specified by strategy.from, as long as the image is managed by
 // OpenShift.
-func (p *pruner) addBuildStrategyImageReferencesToGraph(referrer *corev1.ObjectReference, strategy buildapi.BuildStrategy, predecessor gonum.Node) []error {
-	externalStrategy := buildv1.BuildStrategy{}
-	if err := legacyscheme.Scheme.Convert(&strategy, &externalStrategy, nil); err != nil {
-		return []error{fmt.Errorf("unable to convert strategy: %v", err)}
-	}
-	from := buildapihelpers.GetInputReference(externalStrategy)
+func (p *pruner) addBuildStrategyImageReferencesToGraph(referrer *corev1.ObjectReference, strategy buildv1.BuildStrategy, predecessor gonum.Node) []error {
+	from := buildapihelpers.GetInputReference(strategy)
 	if from == nil {
 		glog.V(4).Infof("Unable to determine 'from' reference - skipping")
 		return nil
@@ -840,8 +849,8 @@ func (p *pruner) addBuildStrategyImageReferencesToGraph(referrer *corev1.ObjectR
 }
 
 func (p *pruner) handleImageStreamEvent(event watch.Event) {
-	getIsNode := func() (*imageapi.ImageStream, *imagegraph.ImageStreamNode) {
-		is, ok := event.Object.(*imageapi.ImageStream)
+	getIsNode := func() (*imagev1.ImageStream, *imagegraph.ImageStreamNode) {
+		is, ok := event.Object.(*imagev1.ImageStream)
 		if !ok {
 			utilruntime.HandleError(fmt.Errorf("internal error: expected ImageStream object in %s event, not %T", event.Type, event.Object))
 			return nil, nil
@@ -866,7 +875,7 @@ func (p *pruner) handleImageStreamEvent(event watch.Event) {
 			return
 		}
 		glog.V(4).Infof("Adding ImageStream %s to the graph", getName(is))
-		p.addImageStreamsToGraph(&imageapi.ImageStreamList{Items: []imageapi.ImageStream{*is}}, p.imageStreamLimits)
+		p.addImageStreamsToGraph(&imagev1.ImageStreamList{Items: []imagev1.ImageStream{*is}}, p.imageStreamLimits)
 
 	case watch.Modified:
 		is, isNode := getIsNode()
@@ -881,13 +890,13 @@ func (p *pruner) handleImageStreamEvent(event watch.Event) {
 		}
 
 		glog.V(4).Infof("Adding updated ImageStream %s back to the graph", getName(is))
-		p.addImageStreamsToGraph(&imageapi.ImageStreamList{Items: []imageapi.ImageStream{*is}}, p.imageStreamLimits)
+		p.addImageStreamsToGraph(&imagev1.ImageStreamList{Items: []imagev1.ImageStream{*is}}, p.imageStreamLimits)
 	}
 }
 
 func (p *pruner) handleImageEvent(event watch.Event) {
-	getImageNode := func() (*imageapi.Image, *imagegraph.ImageNode) {
-		img, ok := event.Object.(*imageapi.Image)
+	getImageNode := func() (*imagev1.Image, *imagegraph.ImageNode) {
+		img, ok := event.Object.(*imagev1.Image)
 		if !ok {
 			utilruntime.HandleError(fmt.Errorf("internal error: expected Image object in %s event, not %T", event.Type, event.Object))
 			return nil, nil
@@ -908,7 +917,7 @@ func (p *pruner) handleImageEvent(event watch.Event) {
 			return
 		}
 		glog.V(4).Infof("Adding new Image %s to the graph", img.Name)
-		p.addImagesToGraph(&imageapi.ImageList{Items: []imageapi.Image{*img}})
+		p.addImagesToGraph(&imagev1.ImageList{Items: []imagev1.Image{*img}})
 
 	case watch.Deleted:
 		img, imgNode := getImageNode()
@@ -1023,11 +1032,11 @@ func pruneStreams(
 			updatedTags := sets.NewString()
 			deletedTags := sets.NewString()
 
-			for tag := range stream.Status.Tags {
-				if updated, deleted := pruneISTagHistory(g, imageNameToNode, keepYoungerThan, streamName, stream, tag); deleted {
-					deletedTags.Insert(tag)
+			for _, tag := range stream.Status.Tags {
+				if updated, deleted := pruneISTagHistory(g, imageNameToNode, keepYoungerThan, streamName, stream, tag.Tag); deleted {
+					deletedTags.Insert(tag.Tag)
 				} else if updated {
-					updatedTags.Insert(tag)
+					updatedTags.Insert(tag.Tag)
 				}
 			}
 
@@ -1094,11 +1103,17 @@ func pruneISTagHistory(
 	prunableImageNodes map[string]*imagegraph.ImageNode,
 	keepYoungerThan time.Time,
 	streamName string,
-	imageStream *imageapi.ImageStream,
+	imageStream *imagev1.ImageStream,
 	tag string,
 ) (tagUpdated, tagDeleted bool) {
-	history := imageStream.Status.Tags[tag]
-	newHistory := imageapi.TagEventList{}
+	var history imagev1.NamedTagEventList
+	for _, t := range imageStream.Status.Tags {
+		if t.Tag == tag {
+			history = t
+			break
+		}
+	}
+	newHistory := imagev1.NamedTagEventList{Tag: tag}
 
 	for _, tagEvent := range history.Items {
 		glog.V(4).Infof("Checking image stream tag %s:%s generation %d with image %q", streamName, tag, tagEvent.Generation, tagEvent.Image)
@@ -1114,18 +1129,31 @@ func pruneISTagHistory(
 
 	if len(newHistory.Items) == 0 {
 		glog.V(4).Infof("Image stream tag %s:%s - removing empty tag", streamName, tag)
-		delete(imageStream.Status.Tags, tag)
+		tags := []imagev1.NamedTagEventList{}
+		for i := range imageStream.Status.Tags {
+			t := imageStream.Status.Tags[i]
+			if t.Tag != tag {
+				tags = append(tags, t)
+			}
+		}
+		imageStream.Status.Tags = tags
 		tagDeleted = true
 		tagUpdated = false
 	} else if tagUpdated {
-		imageStream.Status.Tags[tag] = newHistory
+		for i := range imageStream.Status.Tags {
+			t := imageStream.Status.Tags[i]
+			if t.Tag == tag {
+				imageStream.Status.Tags[i] = newHistory
+				break
+			}
+		}
 	}
 
 	return
 }
 
 func tagEventIsPrunable(
-	tagEvent imageapi.TagEvent,
+	tagEvent imagev1.TagEvent,
 	g genericgraph.Graph,
 	prunableImageNodes map[string]*imagegraph.ImageNode,
 	keepYoungerThan time.Time,
@@ -1582,42 +1610,42 @@ func streamsReferencingImageComponent(g genericgraph.Graph, cn *imagegraph.Image
 
 // imageDeleter removes an image from OpenShift.
 type imageDeleter struct {
-	images imageclient.ImagesGetter
+	images imagev1client.ImagesGetter
 }
 
 var _ ImageDeleter = &imageDeleter{}
 
 // NewImageDeleter creates a new imageDeleter.
-func NewImageDeleter(images imageclient.ImagesGetter) ImageDeleter {
+func NewImageDeleter(images imagev1client.ImagesGetter) ImageDeleter {
 	return &imageDeleter{
 		images: images,
 	}
 }
 
-func (p *imageDeleter) DeleteImage(image *imageapi.Image) error {
+func (p *imageDeleter) DeleteImage(image *imagev1.Image) error {
 	glog.V(4).Infof("Deleting image %q", image.Name)
 	return p.images.Images().Delete(image.Name, metav1.NewDeleteOptions(0))
 }
 
 // imageStreamDeleter updates an image stream in OpenShift.
 type imageStreamDeleter struct {
-	streams imageclient.ImageStreamsGetter
+	streams imagev1client.ImageStreamsGetter
 }
 
 var _ ImageStreamDeleter = &imageStreamDeleter{}
 
 // NewImageStreamDeleter creates a new imageStreamDeleter.
-func NewImageStreamDeleter(streams imageclient.ImageStreamsGetter) ImageStreamDeleter {
+func NewImageStreamDeleter(streams imagev1client.ImageStreamsGetter) ImageStreamDeleter {
 	return &imageStreamDeleter{
 		streams: streams,
 	}
 }
 
-func (p *imageStreamDeleter) GetImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) {
+func (p *imageStreamDeleter) GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
 	return p.streams.ImageStreams(stream.Namespace).Get(stream.Name, metav1.GetOptions{})
 }
 
-func (p *imageStreamDeleter) UpdateImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) {
+func (p *imageStreamDeleter) UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
 	glog.V(4).Infof("Updating ImageStream %s", getName(stream))
 	is, err := p.streams.ImageStreams(stream.Namespace).UpdateStatus(stream)
 	if err == nil {
@@ -1627,7 +1655,7 @@ func (p *imageStreamDeleter) UpdateImageStream(stream *imageapi.ImageStream) (*i
 }
 
 // NotifyImageStreamPrune shows notification about updated image stream.
-func (p *imageStreamDeleter) NotifyImageStreamPrune(stream *imageapi.ImageStream, updatedTags []string, deletedTags []string) {
+func (p *imageStreamDeleter) NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string) {
 	return
 }
 
@@ -1719,8 +1747,8 @@ func (p *manifestDeleter) DeleteManifest(registryClient *http.Client, registryUR
 	return deleteFromRegistry(registryClient, fmt.Sprintf("%s/v2/%s/manifests/%s", registryURL.String(), repoName, manifest))
 }
 
-func makeISTag(namespace, name, tag string) *imageapi.ImageStreamTag {
-	return &imageapi.ImageStreamTag{
+func makeISTag(namespace, name, tag string) *imagev1.ImageStreamTag {
+	return &imagev1.ImageStreamTag{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      imageapi.JoinImageStreamTag(name, tag),
@@ -1728,6 +1756,6 @@ func makeISTag(namespace, name, tag string) *imageapi.ImageStreamTag {
 	}
 }
 
-func makeISTagWithStream(is *imageapi.ImageStream, tag string) *imageapi.ImageStreamTag {
+func makeISTagWithStream(is *imagev1.ImageStream, tag string) *imagev1.ImageStreamTag {
 	return makeISTag(is.Namespace, is.Name, tag)
 }
diff --git a/pkg/oc/cli/admin/prune/imageprune/prune_test.go b/pkg/oc/cli/admin/prune/imageprune/prune_test.go
index f38405023517..84aa68f8c9d8 100644
--- a/pkg/oc/cli/admin/prune/imageprune/prune_test.go
+++ b/pkg/oc/cli/admin/prune/imageprune/prune_test.go
@@ -17,31 +17,28 @@ import (
 
 	"github.com/golang/glog"
 
+	kappsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/util/diff"
 	"k8s.io/apimachinery/pkg/util/sets"
 	"k8s.io/apimachinery/pkg/watch"
 	"k8s.io/client-go/rest/fake"
-	clientgotesting "k8s.io/client-go/testing"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	kapisext "k8s.io/kubernetes/pkg/apis/extensions"
+	clienttesting "k8s.io/client-go/testing"
+	"k8s.io/kubernetes/pkg/kubectl/scheme"
 
+	"github.com/openshift/api"
 	appsv1 "github.com/openshift/api/apps/v1"
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
+	buildv1 "github.com/openshift/api/build/v1"
+	imagev1 "github.com/openshift/api/image/v1"
+	fakeimageclient "github.com/openshift/client-go/image/clientset/versioned/fake"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
+	fakeimagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	fakeimageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/fake"
-	imageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/typed/image/internalversion"
 	"github.com/openshift/origin/pkg/oc/cli/admin/prune/imageprune/testutil"
 	"github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
-
-	// these are needed to make kapiref.GetReference work in the prune.go file
-	_ "github.com/openshift/origin/pkg/apps/apis/apps/install"
-	_ "github.com/openshift/origin/pkg/build/apis/build/install"
-	_ "github.com/openshift/origin/pkg/image/apis/image/install"
-	_ "k8s.io/kubernetes/pkg/apis/core/install"
-	_ "k8s.io/kubernetes/pkg/apis/extensions/install"
 )
 
 var logLevel = flag.Int("loglevel", 0, "")
@@ -59,17 +56,17 @@ func TestImagePruning(t *testing.T) {
 		ignoreInvalidRefs             *bool
 		keepTagRevisions              *int
 		namespace                     string
-		images                        imageapi.ImageList
-		pods                          kapi.PodList
-		streams                       imageapi.ImageStreamList
-		rcs                           kapi.ReplicationControllerList
-		bcs                           buildapi.BuildConfigList
-		builds                        buildapi.BuildList
-		dss                           kapisext.DaemonSetList
-		deployments                   kapisext.DeploymentList
+		images                        imagev1.ImageList
+		pods                          corev1.PodList
+		streams                       imagev1.ImageStreamList
+		rcs                           corev1.ReplicationControllerList
+		bcs                           buildv1.BuildConfigList
+		builds                        buildv1.BuildList
+		dss                           kappsv1.DaemonSetList
+		deployments                   kappsv1.DeploymentList
 		dcs                           appsv1.DeploymentConfigList
-		rss                           kapisext.ReplicaSetList
-		limits                        map[string][]*kapi.LimitRange
+		rss                           kappsv1.ReplicaSetList
+		limits                        map[string][]*corev1.LimitRange
 		imageDeleterErr               error
 		imageStreamDeleterErr         error
 		layerDeleterErr               error
@@ -86,7 +83,7 @@ func TestImagePruning(t *testing.T) {
 		{
 			name:   "1 pod - phase pending - don't prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:   testutil.PodList(testutil.Pod("foo", "pod1", kapi.PodPending, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
+			pods:   testutil.PodList(testutil.Pod("foo", "pod1", corev1.PodPending, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			expectedImageDeletions: []string{},
 		},
 
@@ -94,9 +91,9 @@ func TestImagePruning(t *testing.T) {
 			name:   "3 pods - last phase pending - don't prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			pods: testutil.PodList(
-				testutil.Pod("foo", "pod1", kapi.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod2", kapi.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod3", kapi.PodPending, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod2", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod3", corev1.PodPending, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 			),
 			expectedImageDeletions: []string{},
 		},
@@ -104,7 +101,7 @@ func TestImagePruning(t *testing.T) {
 		{
 			name:   "1 pod - phase running - don't prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:   testutil.PodList(testutil.Pod("foo", "pod1", kapi.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
+			pods:   testutil.PodList(testutil.Pod("foo", "pod1", corev1.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			expectedImageDeletions: []string{},
 		},
 
@@ -112,9 +109,9 @@ func TestImagePruning(t *testing.T) {
 			name:   "3 pods - last phase running - don't prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			pods: testutil.PodList(
-				testutil.Pod("foo", "pod1", kapi.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod2", kapi.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod3", kapi.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod2", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod3", corev1.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 			),
 			expectedImageDeletions: []string{},
 		},
@@ -122,7 +119,7 @@ func TestImagePruning(t *testing.T) {
 		{
 			name:   "pod phase succeeded - prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:   testutil.PodList(testutil.Pod("foo", "pod1", kapi.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
+			pods:   testutil.PodList(testutil.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 			expectedBlobDeletions: []string{
 				registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000000",
@@ -138,7 +135,7 @@ func TestImagePruning(t *testing.T) {
 			name:          "pod phase succeeded - prune leave registry alone",
 			pruneRegistry: newBool(false),
 			images:        testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:          testutil.PodList(testutil.Pod("foo", "pod1", kapi.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
+			pods:          testutil.PodList(testutil.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 			expectedBlobDeletions:  []string{},
 		},
@@ -146,14 +143,14 @@ func TestImagePruning(t *testing.T) {
 		{
 			name:   "pod phase succeeded, pod less than min pruning age - don't prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
-			pods:   testutil.PodList(testutil.AgedPod("foo", "pod1", kapi.PodSucceeded, 5, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
+			pods:   testutil.PodList(testutil.AgedPod("foo", "pod1", corev1.PodSucceeded, 5, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			expectedImageDeletions: []string{},
 		},
 
 		{
 			name:   "pod phase succeeded, image less than min pruning age - don't prune",
 			images: testutil.ImageList(testutil.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", 5)),
-			pods:   testutil.PodList(testutil.Pod("foo", "pod1", kapi.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
+			pods:   testutil.PodList(testutil.Pod("foo", "pod1", corev1.PodSucceeded, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			expectedImageDeletions: []string{},
 		},
 
@@ -161,9 +158,9 @@ func TestImagePruning(t *testing.T) {
 			name:   "pod phase failed - prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			pods: testutil.PodList(
-				testutil.Pod("foo", "pod1", kapi.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod2", kapi.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod3", kapi.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod1", corev1.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod2", corev1.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod3", corev1.PodFailed, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 			expectedBlobDeletions: []string{
@@ -180,9 +177,9 @@ func TestImagePruning(t *testing.T) {
 			name:   "pod phase unknown - prune",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			pods: testutil.PodList(
-				testutil.Pod("foo", "pod1", kapi.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod2", kapi.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
-				testutil.Pod("foo", "pod3", kapi.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod1", corev1.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod2", corev1.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
+				testutil.Pod("foo", "pod3", corev1.PodUnknown, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 			expectedBlobDeletions: []string{
@@ -199,7 +196,7 @@ func TestImagePruning(t *testing.T) {
 			name:   "pod container image not parsable",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			pods: testutil.PodList(
-				testutil.Pod("foo", "pod1", kapi.PodRunning, "a/b/c/d/e"),
+				testutil.Pod("foo", "pod1", corev1.PodRunning, "a/b/c/d/e"),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 			expectedBlobDeletions: []string{
@@ -216,7 +213,7 @@ func TestImagePruning(t *testing.T) {
 			name:   "pod container image doesn't have an id",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			pods: testutil.PodList(
-				testutil.Pod("foo", "pod1", kapi.PodRunning, "foo/bar:latest"),
+				testutil.Pod("foo", "pod1", corev1.PodRunning, "foo/bar:latest"),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 			expectedBlobDeletions: []string{
@@ -233,7 +230,7 @@ func TestImagePruning(t *testing.T) {
 			name:   "pod refers to image not in graph",
 			images: testutil.ImageList(testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			pods: testutil.PodList(
-				testutil.Pod("foo", "pod1", kapi.PodRunning, registryHost+"/foo/bar@sha256:ABC0000000000000000000000000000000000000000000000000000000000002"),
+				testutil.Pod("foo", "pod1", corev1.PodRunning, registryHost+"/foo/bar@sha256:ABC0000000000000000000000000000000000000000000000000000000000002"),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 			expectedBlobDeletions: []string{
@@ -386,14 +383,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
 			expectedStreamUpdates:         []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
@@ -410,14 +407,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer1", "layer2"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			blobDeleterErrorGetter: func(dgst string) error {
 				if dgst == "layer1" {
@@ -446,14 +443,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer1", "layer2"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			blobDeleterErrorGetter:        func(dgst string) error { return errors.New("err") },
 			expectedImageDeletions:        []string{},
@@ -473,14 +470,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			manifestDeleterErr:            fmt.Errorf("err"),
 			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
@@ -499,14 +496,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			imageStreamDeleterErr: fmt.Errorf("err"),
 			expectedFailures:      []string{"foo/bar|err"},
@@ -519,14 +516,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
-				)),
+				}),
 			),
 		},
 
@@ -539,14 +536,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 			),
 			streams: testutil.StreamList(
-				testutil.AgedStream(registryHost, "foo", "bar", 5, testutil.Tags(
+				testutil.AgedStream(registryHost, "foo", "bar", 5, []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			expectedImageDeletions: []string{},
 			expectedStreamUpdates:  []string{},
@@ -558,12 +555,12 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 					),
-				)),
+				}),
 			),
 			expectedStreamUpdates: []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000000"},
 		},
@@ -574,14 +571,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", nil, "layer1"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 					),
 					testutil.Tag("tag",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
-				)),
+				}),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000001"},
 			expectedStreamUpdates: []string{
@@ -600,12 +597,12 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", nil),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.YoungTagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000", metav1.Now()),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 					),
-				)),
+				}),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000002"},
 			expectedBlobDeletions:  []string{registryURL + "|sha256:0000000000000000000000000000000000000000000000000000000000000002"},
@@ -623,7 +620,7 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000006", registryHost+"/foo/baz@sha256:0000000000000000000000000000000000000000000000000000000000000006"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
@@ -635,15 +632,15 @@ func TestImagePruning(t *testing.T) {
 					testutil.Tag("dummy", // removed because no object references the image (the nm/dcfoo has mismatched repository name)
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005"),
 					),
-				)),
-				testutil.Stream(registryHost, "foo", "baz", testutil.Tags(
+				}),
+				testutil.Stream(registryHost, "foo", "baz", []imagev1.NamedTagEventList{
 					testutil.Tag("late", // kept because replicaset references the tagged image
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
 					testutil.Tag("keepme", // kept because a deployment references the tagged image
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000006", registryHost+"/foo/baz@sha256:0000000000000000000000000000000000000000000000000000000000000006"),
 					),
-				)),
+				}),
 			),
 			dss: testutil.DSList(testutil.DS("nm", "dsfoo", fmt.Sprintf("%s/%s/%s:%s", registryHost, "foo", "bar", "latest"))),
 			dcs: testutil.DCList(testutil.DC("nm", "dcfoo", fmt.Sprintf("%s/%s/%s:%s", registryHost, "foo", "repo", "dummy"))),
@@ -685,15 +682,15 @@ func TestImagePruning(t *testing.T) {
 				testutil.Image("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
-				)),
+				}),
 			),
 			rcs:                    testutil.RCList(testutil.RC("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002")),
-			pods:                   testutil.PodList(testutil.Pod("foo", "pod1", kapi.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002")),
+			pods:                   testutil.PodList(testutil.Pod("foo", "pod1", corev1.PodRunning, registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002")),
 			dcs:                    testutil.DCList(testutil.DC("foo", "rc1", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			bcs:                    testutil.BCList(testutil.BC("foo", "bc1", "source", "DockerImage", "foo", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
 			builds:                 testutil.BuildList(testutil.Build("foo", "build1", "custom", "ImageStreamImage", "foo", "bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")),
@@ -846,14 +843,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer5", "layer6", "layer7", "layer8"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
 			expectedStreamUpdates:  []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
@@ -882,14 +879,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004", nil, "layer5", "layer6", "layer7", "layer8"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			layerDeleterErr:               fmt.Errorf("err"),
 			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004"},
@@ -926,14 +923,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005", &testutil.Config2, "layer5", "layer6", "layer9", "layerX"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004", "sha256:0000000000000000000000000000000000000000000000000000000000000005"},
 			expectedStreamUpdates:  []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000004"},
@@ -968,14 +965,14 @@ func TestImagePruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000005", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000005", &testutil.Config2, "layer5", "layer6", "layer9", "layerX"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
 			imageDeleterErr:        fmt.Errorf("err"),
 			expectedImageDeletions: []string{"sha256:0000000000000000000000000000000000000000000000000000000000000004", "sha256:0000000000000000000000000000000000000000000000000000000000000005"},
@@ -1018,15 +1015,15 @@ func TestImagePruning(t *testing.T) {
 				testutil.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 					),
-				)),
+				}),
 			),
-			limits: map[string][]*kapi.LimitRange{
+			limits: map[string][]*corev1.LimitRange{
 				"foo": testutil.LimitList(100, 200),
 			},
 			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000003"},
@@ -1045,20 +1042,20 @@ func TestImagePruning(t *testing.T) {
 				testutil.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/bar/foo@sha256:0000000000000000000000000000000000000000000000000000000000000004", 600, nil),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
-				)),
-				testutil.Stream(registryHost, "bar", "foo", testutil.Tags(
+				}),
+				testutil.Stream(registryHost, "bar", "foo", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/bar/foo@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", registryHost+"/bar/foo@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
 					),
-				)),
+				}),
 			),
-			limits: map[string][]*kapi.LimitRange{
+			limits: map[string][]*corev1.LimitRange{
 				"foo": testutil.LimitList(150),
 				"bar": testutil.LimitList(550),
 			},
@@ -1083,15 +1080,15 @@ func TestImagePruning(t *testing.T) {
 				testutil.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 					),
-				)),
+				}),
 			),
-			limits: map[string][]*kapi.LimitRange{
+			limits: map[string][]*corev1.LimitRange{
 				"foo": testutil.LimitList(300),
 			},
 			expectedImageDeletions: []string{},
@@ -1108,15 +1105,15 @@ func TestImagePruning(t *testing.T) {
 				testutil.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 					),
-				)),
+				}),
 			),
-			limits: map[string][]*kapi.LimitRange{
+			limits: map[string][]*corev1.LimitRange{
 				"foo": testutil.LimitList(100, 200),
 			},
 			expectedStreamUpdates: []string{"foo/bar|sha256:0000000000000000000000000000000000000000000000000000000000000003"},
@@ -1132,18 +1129,18 @@ func TestImagePruning(t *testing.T) {
 				testutil.SizedImage("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 200, nil),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream(registryHost, "foo", "bar", testutil.Tags(
+				testutil.Stream(registryHost, "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "otherregistry/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", registryHost+"/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 					),
-				)),
+				}),
 			),
 			builds: testutil.BuildList(
 				testutil.Build("foo", "build1", "source", "DockerImage", "foo", registryHost+"/foo/bar@sha256:many-zeros-and-3"),
 			),
-			limits: map[string][]*kapi.LimitRange{
+			limits: map[string][]*corev1.LimitRange{
 				"foo": testutil.LimitList(100, 200),
 			},
 			expectedImageDeletions:        []string{"sha256:0000000000000000000000000000000000000000000000000000000000000003"},
@@ -1177,6 +1174,9 @@ func TestImagePruning(t *testing.T) {
 		},
 	}
 
+	// we need to install OpenShift API types to kubectl's scheme for GetReference to work
+	api.Install(scheme.Scheme)
+
 	for _, test := range tests {
 		t.Run(test.name, func(t *testing.T) {
 			options := PrunerOptions{
@@ -1362,12 +1362,12 @@ func TestImageDeleter(t *testing.T) {
 	}
 
 	for name, test := range tests {
-		imageClient := fakeimageclient.Clientset{}
-		imageClient.AddReactor("delete", "images", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+		imageClient := &fakeimagev1client.FakeImageV1{Fake: &clienttesting.Fake{}}
+		imageClient.AddReactor("delete", "images", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
 			return true, nil, test.imageDeletionError
 		})
-		imageDeleter := NewImageDeleter(imageClient.Image())
-		err := imageDeleter.DeleteImage(&imageapi.Image{ObjectMeta: metav1.ObjectMeta{Name: "sha256:0000000000000000000000000000000000000000000000000000000000000002"}})
+		imageDeleter := NewImageDeleter(imageClient)
+		err := imageDeleter.DeleteImage(&imagev1.Image{ObjectMeta: metav1.ObjectMeta{Name: "sha256:0000000000000000000000000000000000000000000000000000000000000002"}})
 		if test.imageDeletionError != nil {
 			if e, a := test.imageDeletionError, err; e != a {
 				t.Errorf("%s: err: expected %v, got %v", name, e, a)
@@ -1423,8 +1423,8 @@ func TestRegistryPruning(t *testing.T) {
 
 	tests := []struct {
 		name                       string
-		images                     imageapi.ImageList
-		streams                    imageapi.ImageStreamList
+		images                     imagev1.ImageList
+		streams                    imagev1.ImageStreamList
 		expectedLayerLinkDeletions sets.String
 		expectedBlobDeletions      sets.String
 		expectedManifestDeletions  sets.String
@@ -1439,17 +1439,17 @@ func TestRegistryPruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002", &testutil.Config2, "layer3", "layer4", "layer5", "layer6"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream("registry1.io", "foo", "bar", testutil.Tags(
+				testutil.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 					),
-				)),
-				testutil.Stream("registry1.io", "foo", "other", testutil.Tags(
+				}),
+				testutil.Stream("registry1.io", "foo", "other", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
-				)),
+				}),
 			),
 			expectedLayerLinkDeletions: sets.NewString(
 				"https://registry1.io|foo/bar|"+testutil.Config1,
@@ -1474,11 +1474,11 @@ func TestRegistryPruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001", &testutil.Config1, "layer1", "layer2", "layer3", "layer4"),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream("registry1.io", "foo", "bar", testutil.Tags(
+				testutil.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 					),
-				)),
+				}),
 			),
 			expectedLayerLinkDeletions: sets.NewString(),
 			expectedBlobDeletions:      sets.NewString(),
@@ -1517,18 +1517,18 @@ func TestRegistryPruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003", nil, "layer3", "layer4", "layer6", testutil.Config1),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream("registry1.io", "foo", "bar", testutil.Tags(
+				testutil.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 					),
-				)),
-				testutil.Stream("registry1.io", "foo", "other", testutil.Tags(
+				}),
+				testutil.Stream("registry1.io", "foo", "other", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
-				)),
+				}),
 			),
 			expectedLayerLinkDeletions: sets.NewString(
 				"https://registry1.io|foo/bar|layer1",
@@ -1553,18 +1553,18 @@ func TestRegistryPruning(t *testing.T) {
 				testutil.ImageWithLayers("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003", nil, "layer3", "layer4", "layer6", testutil.Config1),
 			),
 			streams: testutil.StreamList(
-				testutil.Stream("registry1.io", "foo", "bar", testutil.Tags(
+				testutil.Stream("registry1.io", "foo", "bar", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 					),
-				)),
-				testutil.Stream("registry1.io", "foo", "other", testutil.Tags(
+				}),
+				testutil.Stream("registry1.io", "foo", "other", []imagev1.NamedTagEventList{
 					testutil.Tag("latest",
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/other@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 						testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
 					),
-				)),
+				}),
 			),
 			expectedLayerLinkDeletions: sets.NewString(),
 			expectedBlobDeletions:      sets.NewString(),
@@ -1584,14 +1584,14 @@ func TestRegistryPruning(t *testing.T) {
 				ImageWatcher:     watch.NewFake(),
 				Streams:          &test.streams,
 				StreamWatcher:    watch.NewFake(),
-				Pods:             &kapi.PodList{},
-				RCs:              &kapi.ReplicationControllerList{},
-				BCs:              &buildapi.BuildConfigList{},
-				Builds:           &buildapi.BuildList{},
-				DSs:              &kapisext.DaemonSetList{},
-				Deployments:      &kapisext.DeploymentList{},
+				Pods:             &corev1.PodList{},
+				RCs:              &corev1.ReplicationControllerList{},
+				BCs:              &buildv1.BuildConfigList{},
+				Builds:           &buildv1.BuildList{},
+				DSs:              &kappsv1.DaemonSetList{},
+				Deployments:      &kappsv1.DeploymentList{},
 				DCs:              &appsv1.DeploymentConfigList{},
-				RSs:              &kapisext.ReplicaSetList{},
+				RSs:              &kappsv1.ReplicaSetList{},
 				RegistryClientFactory: FakeRegistryClientFactory,
 				RegistryURL:           &url.URL{Scheme: "https", Host: "registry1.io"},
 			}
@@ -1636,7 +1636,7 @@ func TestImageWithStrongAndWeakRefsIsNotPruned(t *testing.T) {
 		testutil.AgedImage("0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 1540),
 	)
 	streams := testutil.StreamList(
-		testutil.Stream("registry1", "foo", "bar", testutil.Tags(
+		testutil.Stream("registry1", "foo", "bar", []imagev1.NamedTagEventList{
 			testutil.Tag("latest",
 				testutil.TagEvent("0000000000000000000000000000000000000000000000000000000000000003", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003"),
 				testutil.TagEvent("0000000000000000000000000000000000000000000000000000000000000002", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
@@ -1645,7 +1645,7 @@ func TestImageWithStrongAndWeakRefsIsNotPruned(t *testing.T) {
 			testutil.Tag("strong",
 				testutil.TagEvent("0000000000000000000000000000000000000000000000000000000000000001", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000001"),
 			),
-		)),
+		}),
 	)
 	pods := testutil.PodList()
 	rcs := testutil.RCList()
@@ -1713,8 +1713,8 @@ func TestImageWithStrongAndWeakRefsIsNotPruned(t *testing.T) {
 
 func TestImageIsPrunable(t *testing.T) {
 	g := genericgraph.New()
-	imageNode := imagegraph.EnsureImageNode(g, &imageapi.Image{ObjectMeta: metav1.ObjectMeta{Name: "myImage"}})
-	streamNode := imagegraph.EnsureImageStreamNode(g, &imageapi.ImageStream{ObjectMeta: metav1.ObjectMeta{Name: "myStream"}})
+	imageNode := imagegraph.EnsureImageNode(g, &imagev1.Image{ObjectMeta: metav1.ObjectMeta{Name: "myImage"}})
+	streamNode := imagegraph.EnsureImageStreamNode(g, &imagev1.ImageStream{ObjectMeta: metav1.ObjectMeta{Name: "myStream"}})
 	g.AddEdge(streamNode, imageNode, ReferencedImageEdgeKind)
 	g.AddEdge(streamNode, imageNode, WeakReferencedImageEdgeKind)
 
@@ -1748,18 +1748,18 @@ func TestPrunerGetNextJob(t *testing.T) {
 
 	is := images.Items
 	imageStreams := testutil.StreamList(
-		testutil.Stream("example.com", "foo", "bar", testutil.Tags(
+		testutil.Stream("example.com", "foo", "bar", []imagev1.NamedTagEventList{
 			testutil.Tag("latest",
 				testutil.TagEvent(is[3].Name, is[3].DockerImageReference),
 				testutil.TagEvent(is[4].Name, is[4].DockerImageReference),
-				testutil.TagEvent(is[5].Name, is[5].DockerImageReference)))),
-		testutil.Stream("example.com", "foo", "baz", testutil.Tags(
+				testutil.TagEvent(is[5].Name, is[5].DockerImageReference))}),
+		testutil.Stream("example.com", "foo", "baz", []imagev1.NamedTagEventList{
 			testutil.Tag("devel",
 				testutil.TagEvent(is[3].Name, is[3].DockerImageReference),
 				testutil.TagEvent(is[2].Name, is[2].DockerImageReference),
 				testutil.TagEvent(is[1].Name, is[1].DockerImageReference)),
 			testutil.Tag("prod",
-				testutil.TagEvent(is[2].Name, is[2].DockerImageReference)))))
+				testutil.TagEvent(is[2].Name, is[2].DockerImageReference))}))
 	if err := p.addImageStreamsToGraph(&imageStreams, nil); err != nil {
 		t.Fatalf("failed to add image streams: %v", err)
 	}
@@ -1772,7 +1772,7 @@ func TestPrunerGetNextJob(t *testing.T) {
 	sort.Sort(byLayerCountAndAge(prunable))
 	p.queue = makeQueue(prunable)
 
-	checkQueue := func(desc string, expected ...*imageapi.Image) {
+	checkQueue := func(desc string, expected ...*imagev1.Image) {
 		for i, item := 0, p.queue; i < len(expected) || item != nil; i++ {
 			if i >= len(expected) {
 				t.Errorf("[%s] unexpected image at #%d: %s", desc, i, item.node.Image.Name)
@@ -1869,7 +1869,7 @@ func expectBlockedOrJob(
 	p *pruner,
 	desc string,
 	blocked bool,
-	image *imageapi.Image,
+	image *imagev1.Image,
 	layers []string,
 ) func(job *Job, blocked bool) *Job {
 	return func(job *Job, b bool) *Job {
@@ -1931,7 +1931,7 @@ func TestChangeImageStreamsWhilePruning(t *testing.T) {
 		testutil.AgedImage("sha256:0000000000000000000000000000000000000000000000000000000000000005", "registry1.io/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000003", 1),
 	)
 
-	streams := testutil.StreamList(testutil.Stream("registry1", "foo", "bar", testutil.Tags()))
+	streams := testutil.StreamList(testutil.Stream("registry1", "foo", "bar", []imagev1.NamedTagEventList{}))
 	streamWatcher := watch.NewFake()
 	pods := testutil.PodList()
 	rcs := testutil.RCList()
@@ -1998,10 +1998,10 @@ func TestChangeImageStreamsWhilePruning(t *testing.T) {
 	expectedBlobDeletions.Insert("registry1|" + images.Items[0].Name)
 
 	// let the pruner wait for reply and meanwhile reference an image with a new image stream
-	stream := testutil.Stream("registry1", "foo", "new", testutil.Tags(
+	stream := testutil.Stream("registry1", "foo", "new", []imagev1.NamedTagEventList{
 		testutil.Tag("latest",
 			testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000002", "registry1/foo/new@sha256:0000000000000000000000000000000000000000000000000000000000000002"),
-		)))
+		)})
 	streamWatcher.Add(&stream)
 	imageDeleter.unblock()
 
@@ -2014,11 +2014,11 @@ func TestChangeImageStreamsWhilePruning(t *testing.T) {
 	expectedBlobDeletions.Insert("registry1|" + images.Items[2].Name)
 
 	// now lets modify the existing image stream to reference some more images
-	stream = testutil.Stream("registry1", "foo", "bar", testutil.Tags(
+	stream = testutil.Stream("registry1", "foo", "bar", []imagev1.NamedTagEventList{
 		testutil.Tag("latest",
 			testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000000", "registry1/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000"),
 			testutil.TagEvent("sha256:0000000000000000000000000000000000000000000000000000000000000004", "registry1/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000004"),
-		)))
+		)})
 	streamWatcher.Modify(&stream)
 	imageDeleter.unblock()
 
@@ -2061,13 +2061,13 @@ func TestChangeImageStreamsWhilePruning(t *testing.T) {
 	}
 }
 
-func streamListToClient(list *imageapi.ImageStreamList) imageclient.ImageStreamsGetter {
+func streamListToClient(list *imagev1.ImageStreamList) imagev1client.ImageStreamsGetter {
 	streams := make([]runtime.Object, 0, len(list.Items))
 	for i := range list.Items {
 		streams = append(streams, &list.Items[i])
 	}
 
-	return fakeimageclient.NewSimpleClientset(streams...).Image()
+	return &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset(streams...).Fake)}
 }
 
 func keepTagRevisions(n int) *int {
@@ -2082,7 +2082,7 @@ type fakeImageDeleter struct {
 
 var _ ImageDeleter = &fakeImageDeleter{}
 
-func (p *fakeImageDeleter) DeleteImage(image *imageapi.Image) error {
+func (p *fakeImageDeleter) DeleteImage(image *imagev1.Image) error {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	p.invocations.Insert(image.Name)
@@ -2102,11 +2102,11 @@ func newFakeImageDeleter(err error) (*fakeImageDeleter, ImagePrunerFactoryFunc)
 type blockingImageDeleter struct {
 	t        *testing.T
 	d        *fakeImageDeleter
-	requests chan *imageapi.Image
+	requests chan *imagev1.Image
 	reply    chan struct{}
 }
 
-func (bid *blockingImageDeleter) DeleteImage(img *imageapi.Image) error {
+func (bid *blockingImageDeleter) DeleteImage(img *imagev1.Image) error {
 	bid.requests <- img
 	select {
 	case <-bid.reply:
@@ -2116,7 +2116,7 @@ func (bid *blockingImageDeleter) DeleteImage(img *imageapi.Image) error {
 	return bid.d.DeleteImage(img)
 }
 
-func (bid *blockingImageDeleter) waitForRequest() *imageapi.Image {
+func (bid *blockingImageDeleter) waitForRequest() *imagev1.Image {
 	select {
 	case img := <-bid.requests:
 		return img
@@ -2135,7 +2135,7 @@ func newBlockingImageDeleter(t *testing.T) (*blockingImageDeleter, ImagePrunerFa
 	blocking := blockingImageDeleter{
 		t:        t,
 		d:        deleter,
-		requests: make(chan *imageapi.Image),
+		requests: make(chan *imagev1.Image),
 		reply:    make(chan struct{}),
 	}
 	return &blocking, func() (ImageDeleter, error) {
@@ -2153,7 +2153,7 @@ type fakeImageStreamDeleter struct {
 
 var _ ImageStreamDeleter = &fakeImageStreamDeleter{}
 
-func (p *fakeImageStreamDeleter) GetImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) {
+func (p *fakeImageStreamDeleter) GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	if p.streamImages == nil {
@@ -2162,24 +2162,24 @@ func (p *fakeImageStreamDeleter) GetImageStream(stream *imageapi.ImageStream) (*
 	if p.streamTags == nil {
 		p.streamTags = make(map[string][]string)
 	}
-	for tag, history := range stream.Status.Tags {
+	for _, tag := range stream.Status.Tags {
 		streamName := fmt.Sprintf("%s/%s", stream.Namespace, stream.Name)
-		p.streamTags[streamName] = append(p.streamTags[streamName], tag)
+		p.streamTags[streamName] = append(p.streamTags[streamName], tag.Tag)
 
-		for _, tagEvent := range history.Items {
+		for _, tagEvent := range tag.Items {
 			p.streamImages[streamName] = append(p.streamImages[streamName], tagEvent.Image)
 		}
 	}
 	return stream, p.err
 }
 
-func (p *fakeImageStreamDeleter) UpdateImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) {
+func (p *fakeImageStreamDeleter) UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
 	streamImages := make(map[string]struct{})
 	streamTags := make(map[string]struct{})
 
-	for tag, history := range stream.Status.Tags {
-		streamTags[tag] = struct{}{}
-		for _, tagEvent := range history.Items {
+	for _, tag := range stream.Status.Tags {
+		streamTags[tag.Tag] = struct{}{}
+		for _, tagEvent := range tag.Items {
 			streamImages[tagEvent.Image] = struct{}{}
 		}
 	}
@@ -2201,7 +2201,7 @@ func (p *fakeImageStreamDeleter) UpdateImageStream(stream *imageapi.ImageStream)
 	return stream, p.err
 }
 
-func (p *fakeImageStreamDeleter) NotifyImageStreamPrune(stream *imageapi.ImageStream, updatedTags []string, deletedTags []string) {
+func (p *fakeImageStreamDeleter) NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string) {
 	return
 }
 
diff --git a/pkg/oc/cli/admin/prune/imageprune/testutil/util.go b/pkg/oc/cli/admin/prune/imageprune/testutil/util.go
index 6cd8176dbaca..0a7106495ad8 100644
--- a/pkg/oc/cli/admin/prune/imageprune/testutil/util.go
+++ b/pkg/oc/cli/admin/prune/imageprune/testutil/util.go
@@ -7,14 +7,16 @@ import (
 	"github.com/docker/distribution/manifest/schema1"
 	"github.com/docker/distribution/manifest/schema2"
 
+	kappsv1 "k8s.io/api/apps/v1"
 	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/api/resource"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	kapisext "k8s.io/kubernetes/pkg/apis/extensions"
+	"k8s.io/apimachinery/pkg/runtime"
 
 	appsv1 "github.com/openshift/api/apps/v1"
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
+	buildv1 "github.com/openshift/api/build/v1"
+	dockerv10 "github.com/openshift/api/image/docker10"
+	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 )
 
@@ -32,19 +34,19 @@ var (
 )
 
 // ImageList turns the given images into ImageList.
-func ImageList(images ...imageapi.Image) imageapi.ImageList {
-	return imageapi.ImageList{
+func ImageList(images ...imagev1.Image) imagev1.ImageList {
+	return imagev1.ImageList{
 		Items: images,
 	}
 }
 
 // AgedImage creates a test image with specified age.
-func AgedImage(id, ref string, ageInMinutes int64, layers ...string) imageapi.Image {
+func AgedImage(id, ref string, ageInMinutes int64, layers ...string) imagev1.Image {
 	return CreatedImage(id, ref, time.Now().Add(time.Duration(ageInMinutes)*time.Minute*-1), layers...)
 }
 
 // CreatedImage creates a test image with the CreationTime set to the given timestamp.
-func CreatedImage(id, ref string, created time.Time, layers ...string) imageapi.Image {
+func CreatedImage(id, ref string, created time.Time, layers ...string) imagev1.Image {
 	if len(layers) == 0 {
 		layers = []string{Layer1, Layer2, Layer3, Layer4, Layer5}
 	}
@@ -54,22 +56,26 @@ func CreatedImage(id, ref string, created time.Time, layers ...string) imageapi.
 }
 
 // SizedImage returns a test image of given size.
-func SizedImage(id, ref string, size int64, configName *string) imageapi.Image {
+func SizedImage(id, ref string, size int64, configName *string) imagev1.Image {
 	image := ImageWithLayers(id, ref, configName, Layer1, Layer2, Layer3, Layer4, Layer5)
 	image.CreationTimestamp = metav1.NewTime(metav1.Now().Add(time.Duration(-1) * time.Minute))
-	image.DockerImageMetadata.Size = size
+	dockerImageMetadata, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
+	if !ok {
+		panic("Failed casting DockerImageMetadata")
+	}
+	dockerImageMetadata.Size = size
 
 	return image
 }
 
 // Image returns a default test image object 120 minutes old.
-func Image(id, ref string) imageapi.Image {
+func Image(id, ref string) imagev1.Image {
 	return AgedImage(id, ref, 120)
 }
 
 // Image returns a default test image referencing the given layers.
-func ImageWithLayers(id, ref string, configName *string, layers ...string) imageapi.Image {
-	image := imageapi.Image{
+func ImageWithLayers(id, ref string, configName *string, layers ...string) imagev1.Image {
+	image := imagev1.Image{
 		ObjectMeta: metav1.ObjectMeta{
 			Name: id,
 			Annotations: map[string]string{
@@ -80,24 +86,29 @@ func ImageWithLayers(id, ref string, configName *string, layers ...string) image
 		DockerImageManifestMediaType: schema1.MediaTypeManifest,
 	}
 
+	image.DockerImageMetadata = runtime.RawExtension{
+		Object: &dockerv10.DockerImage{},
+	}
 	if configName != nil {
-		image.DockerImageMetadata = imageapi.DockerImage{
-			ID: *configName,
+		image.DockerImageMetadata = runtime.RawExtension{
+			Object: &dockerv10.DockerImage{
+				ID: *configName,
+			},
 		}
 		image.DockerImageConfig = fmt.Sprintf("{Digest: %s}", *configName)
 		image.DockerImageManifestMediaType = schema2.MediaTypeManifest
 	}
 
-	image.DockerImageLayers = []imageapi.ImageLayer{}
+	image.DockerImageLayers = []imagev1.ImageLayer{}
 	for _, layer := range layers {
-		image.DockerImageLayers = append(image.DockerImageLayers, imageapi.ImageLayer{Name: layer})
+		image.DockerImageLayers = append(image.DockerImageLayers, imagev1.ImageLayer{Name: layer})
 	}
 
 	return image
 }
 
 // UnmanagedImage creates a test image object lacking managed by OpenShift annotation.
-func UnmanagedImage(id, ref string, hasAnnotations bool, annotation, value string) imageapi.Image {
+func UnmanagedImage(id, ref string, hasAnnotations bool, annotation, value string) imagev1.Image {
 	image := ImageWithLayers(id, ref, nil)
 	if !hasAnnotations {
 		image.Annotations = nil
@@ -109,27 +120,27 @@ func UnmanagedImage(id, ref string, hasAnnotations bool, annotation, value strin
 }
 
 // PodList turns the given pods into PodList.
-func PodList(pods ...kapi.Pod) kapi.PodList {
-	return kapi.PodList{
+func PodList(pods ...corev1.Pod) corev1.PodList {
+	return corev1.PodList{
 		Items: pods,
 	}
 }
 
 // Pod creates and returns a pod having the given docker image references.
-func Pod(namespace, name string, phase kapi.PodPhase, containerImages ...string) kapi.Pod {
+func Pod(namespace, name string, phase corev1.PodPhase, containerImages ...string) corev1.Pod {
 	return AgedPod(namespace, name, phase, -1, containerImages...)
 }
 
 // AgedPod creates and returns a pod of particular age.
-func AgedPod(namespace, name string, phase kapi.PodPhase, ageInMinutes int64, containerImages ...string) kapi.Pod {
-	pod := kapi.Pod{
+func AgedPod(namespace, name string, phase corev1.PodPhase, ageInMinutes int64, containerImages ...string) corev1.Pod {
+	pod := corev1.Pod{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/pod/" + name,
+			SelfLink:  "/api/v1/pods/" + name,
 		},
 		Spec: PodSpecInternal(containerImages...),
-		Status: kapi.PodStatus{
+		Status: corev1.PodStatus{
 			Phase: phase,
 		},
 	}
@@ -142,12 +153,12 @@ func AgedPod(namespace, name string, phase kapi.PodPhase, ageInMinutes int64, co
 }
 
 // PodSpecInternal creates a pod specification having the given docker image references.
-func PodSpecInternal(containerImages ...string) kapi.PodSpec {
-	spec := kapi.PodSpec{
-		Containers: []kapi.Container{},
+func PodSpecInternal(containerImages ...string) corev1.PodSpec {
+	spec := corev1.PodSpec{
+		Containers: []corev1.Container{},
 	}
 	for _, image := range containerImages {
-		container := kapi.Container{
+		container := corev1.Container{
 			Image: image,
 		}
 		spec.Containers = append(spec.Containers, container)
@@ -170,25 +181,25 @@ func PodSpec(containerImages ...string) corev1.PodSpec {
 }
 
 // StreamList turns the given streams into StreamList.
-func StreamList(streams ...imageapi.ImageStream) imageapi.ImageStreamList {
-	return imageapi.ImageStreamList{
+func StreamList(streams ...imagev1.ImageStream) imagev1.ImageStreamList {
+	return imagev1.ImageStreamList{
 		Items: streams,
 	}
 }
 
 // Stream creates and returns a test ImageStream object 1 minute old
-func Stream(registry, namespace, name string, tags map[string]imageapi.TagEventList) imageapi.ImageStream {
+func Stream(registry, namespace, name string, tags []imagev1.NamedTagEventList) imagev1.ImageStream {
 	return AgedStream(registry, namespace, name, -1, tags)
 }
 
 // Stream creates and returns a test ImageStream object of given age.
-func AgedStream(registry, namespace, name string, ageInMinutes int64, tags map[string]imageapi.TagEventList) imageapi.ImageStream {
-	stream := imageapi.ImageStream{
+func AgedStream(registry, namespace, name string, ageInMinutes int64, tags []imagev1.NamedTagEventList) imagev1.ImageStream {
+	stream := imagev1.ImageStream{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
 		},
-		Status: imageapi.ImageStreamStatus{
+		Status: imagev1.ImageStreamStatus{
 			DockerImageRepository: fmt.Sprintf("%s/%s/%s", registry, namespace, name),
 			Tags: tags,
 		},
@@ -202,46 +213,30 @@ func AgedStream(registry, namespace, name string, ageInMinutes int64, tags map[s
 }
 
 // Stream creates an ImageStream object and returns a pointer to it.
-func StreamPtr(registry, namespace, name string, tags map[string]imageapi.TagEventList) *imageapi.ImageStream {
+func StreamPtr(registry, namespace, name string, tags []imagev1.NamedTagEventList) *imagev1.ImageStream {
 	s := Stream(registry, namespace, name, tags)
 	return &s
 }
 
-// Tags creates a map of tags for image stream status.
-func Tags(list ...namedTagEventList) map[string]imageapi.TagEventList {
-	m := make(map[string]imageapi.TagEventList, len(list))
-	for _, tag := range list {
-		m[tag.name] = tag.events
-	}
-	return m
-}
-
-type namedTagEventList struct {
-	name   string
-	events imageapi.TagEventList
-}
-
 // Tag creates tag entries for Tags function.
-func Tag(name string, events ...imageapi.TagEvent) namedTagEventList {
-	return namedTagEventList{
-		name: name,
-		events: imageapi.TagEventList{
-			Items: events,
-		},
+func Tag(name string, events ...imagev1.TagEvent) imagev1.NamedTagEventList {
+	return imagev1.NamedTagEventList{
+		Tag:   name,
+		Items: events,
 	}
 }
 
 // TagEvent creates a TagEvent object.
-func TagEvent(id, ref string) imageapi.TagEvent {
-	return imageapi.TagEvent{
+func TagEvent(id, ref string) imagev1.TagEvent {
+	return imagev1.TagEvent{
 		Image:                id,
 		DockerImageReference: ref,
 	}
 }
 
 // YoungTagEvent creates a TagEvent with the given created timestamp.
-func YoungTagEvent(id, ref string, created metav1.Time) imageapi.TagEvent {
-	return imageapi.TagEvent{
+func YoungTagEvent(id, ref string, created metav1.Time) imagev1.TagEvent {
+	return imagev1.TagEvent{
 		Image:                id,
 		Created:              created,
 		DockerImageReference: ref,
@@ -249,22 +244,22 @@ func YoungTagEvent(id, ref string, created metav1.Time) imageapi.TagEvent {
 }
 
 // RCList turns the given replication controllers into RCList.
-func RCList(rcs ...kapi.ReplicationController) kapi.ReplicationControllerList {
-	return kapi.ReplicationControllerList{
+func RCList(rcs ...corev1.ReplicationController) corev1.ReplicationControllerList {
+	return corev1.ReplicationControllerList{
 		Items: rcs,
 	}
 }
 
 // RC creates and returns a ReplicationController.
-func RC(namespace, name string, containerImages ...string) kapi.ReplicationController {
-	return kapi.ReplicationController{
+func RC(namespace, name string, containerImages ...string) corev1.ReplicationController {
+	return corev1.ReplicationController{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/rc/" + name,
+			SelfLink:  "/api/v1/replicationcontrollers/" + name,
 		},
-		Spec: kapi.ReplicationControllerSpec{
-			Template: &kapi.PodTemplateSpec{
+		Spec: corev1.ReplicationControllerSpec{
+			Template: &corev1.PodTemplateSpec{
 				Spec: PodSpecInternal(containerImages...),
 			},
 		},
@@ -272,22 +267,22 @@ func RC(namespace, name string, containerImages ...string) kapi.ReplicationContr
 }
 
 // DSList turns the given daemon sets into DaemonSetList.
-func DSList(dss ...kapisext.DaemonSet) kapisext.DaemonSetList {
-	return kapisext.DaemonSetList{
+func DSList(dss ...kappsv1.DaemonSet) kappsv1.DaemonSetList {
+	return kappsv1.DaemonSetList{
 		Items: dss,
 	}
 }
 
 // DS creates and returns a DaemonSet object.
-func DS(namespace, name string, containerImages ...string) kapisext.DaemonSet {
-	return kapisext.DaemonSet{
+func DS(namespace, name string, containerImages ...string) kappsv1.DaemonSet {
+	return kappsv1.DaemonSet{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/ds/" + name,
+			SelfLink:  "/apis/apps/v1/daemonsets/" + name,
 		},
-		Spec: kapisext.DaemonSetSpec{
-			Template: kapi.PodTemplateSpec{
+		Spec: kappsv1.DaemonSetSpec{
+			Template: corev1.PodTemplateSpec{
 				Spec: PodSpecInternal(containerImages...),
 			},
 		},
@@ -295,22 +290,22 @@ func DS(namespace, name string, containerImages ...string) kapisext.DaemonSet {
 }
 
 // DeploymentList turns the given deployments into DeploymentList.
-func DeploymentList(deployments ...kapisext.Deployment) kapisext.DeploymentList {
-	return kapisext.DeploymentList{
+func DeploymentList(deployments ...kappsv1.Deployment) kappsv1.DeploymentList {
+	return kappsv1.DeploymentList{
 		Items: deployments,
 	}
 }
 
 // Deployment creates and returns aDeployment object.
-func Deployment(namespace, name string, containerImages ...string) kapisext.Deployment {
-	return kapisext.Deployment{
+func Deployment(namespace, name string, containerImages ...string) kappsv1.Deployment {
+	return kappsv1.Deployment{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/deployment/" + name,
+			SelfLink:  "/apis/apps/v1/deployments/" + name,
 		},
-		Spec: kapisext.DeploymentSpec{
-			Template: kapi.PodTemplateSpec{
+		Spec: kappsv1.DeploymentSpec{
+			Template: corev1.PodTemplateSpec{
 				Spec: PodSpecInternal(containerImages...),
 			},
 		},
@@ -330,7 +325,7 @@ func DC(namespace, name string, containerImages ...string) appsv1.DeploymentConf
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/dc/" + name,
+			SelfLink:  "/apis/apps.openshift.io/v1/deploymentconfigs/" + name,
 		},
 		Spec: appsv1.DeploymentConfigSpec{
 			Template: &corev1.PodTemplateSpec{
@@ -341,22 +336,22 @@ func DC(namespace, name string, containerImages ...string) appsv1.DeploymentConf
 }
 
 // RSList turns the given replica set into ReplicaSetList.
-func RSList(rss ...kapisext.ReplicaSet) kapisext.ReplicaSetList {
-	return kapisext.ReplicaSetList{
+func RSList(rss ...kappsv1.ReplicaSet) kappsv1.ReplicaSetList {
+	return kappsv1.ReplicaSetList{
 		Items: rss,
 	}
 }
 
 // RS creates and returns a ReplicaSet object.
-func RS(namespace, name string, containerImages ...string) kapisext.ReplicaSet {
-	return kapisext.ReplicaSet{
+func RS(namespace, name string, containerImages ...string) kappsv1.ReplicaSet {
+	return kappsv1.ReplicaSet{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/rs/" + name,
+			SelfLink:  "/apis/apps/v1/replicasets/" + name,
 		},
-		Spec: kapisext.ReplicaSetSpec{
-			Template: kapi.PodTemplateSpec{
+		Spec: kappsv1.ReplicaSetSpec{
+			Template: corev1.PodTemplateSpec{
 				Spec: PodSpecInternal(containerImages...),
 			},
 		},
@@ -364,59 +359,59 @@ func RS(namespace, name string, containerImages ...string) kapisext.ReplicaSet {
 }
 
 // BCList turns the given build configs into BuildConfigList.
-func BCList(bcs ...buildapi.BuildConfig) buildapi.BuildConfigList {
-	return buildapi.BuildConfigList{
+func BCList(bcs ...buildv1.BuildConfig) buildv1.BuildConfigList {
+	return buildv1.BuildConfigList{
 		Items: bcs,
 	}
 }
 
 // BC creates and returns a BuildConfig object.
-func BC(namespace, name, strategyType, fromKind, fromNamespace, fromName string) buildapi.BuildConfig {
-	return buildapi.BuildConfig{
+func BC(namespace, name, strategyType, fromKind, fromNamespace, fromName string) buildv1.BuildConfig {
+	return buildv1.BuildConfig{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/bc/" + name,
+			SelfLink:  "/apis/build.openshift.io/v1/buildconfigs/" + name,
 		},
-		Spec: buildapi.BuildConfigSpec{
+		Spec: buildv1.BuildConfigSpec{
 			CommonSpec: CommonSpec(strategyType, fromKind, fromNamespace, fromName),
 		},
 	}
 }
 
 // BuildList turns the given builds into BuildList.
-func BuildList(builds ...buildapi.Build) buildapi.BuildList {
-	return buildapi.BuildList{
+func BuildList(builds ...buildv1.Build) buildv1.BuildList {
+	return buildv1.BuildList{
 		Items: builds,
 	}
 }
 
 // Build creates and returns a Build object.
-func Build(namespace, name, strategyType, fromKind, fromNamespace, fromName string) buildapi.Build {
-	return buildapi.Build{
+func Build(namespace, name, strategyType, fromKind, fromNamespace, fromName string) buildv1.Build {
+	return buildv1.Build{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
-			SelfLink:  "/build/" + name,
+			SelfLink:  "/apis/build.openshift.io/v1/builds/" + name,
 		},
-		Spec: buildapi.BuildSpec{
+		Spec: buildv1.BuildSpec{
 			CommonSpec: CommonSpec(strategyType, fromKind, fromNamespace, fromName),
 		},
 	}
 }
 
 // LimitList turns the given limits into LimitRanges.
-func LimitList(limits ...int64) []*kapi.LimitRange {
-	list := make([]*kapi.LimitRange, len(limits))
+func LimitList(limits ...int64) []*corev1.LimitRange {
+	list := make([]*corev1.LimitRange, 0, len(limits))
 	for _, limit := range limits {
 		quantity := resource.NewQuantity(limit, resource.BinarySI)
-		list = append(list, &kapi.LimitRange{
-			Spec: kapi.LimitRangeSpec{
-				Limits: []kapi.LimitRangeItem{
+		list = append(list, &corev1.LimitRange{
+			Spec: corev1.LimitRangeSpec{
+				Limits: []corev1.LimitRangeItem{
 					{
-						Type: imageapi.LimitTypeImage,
-						Max: kapi.ResourceList{
-							kapi.ResourceStorage: *quantity,
+						Type: imagev1.LimitTypeImage,
+						Max: corev1.ResourceList{
+							corev1.ResourceStorage: *quantity,
 						},
 					},
 				},
@@ -427,30 +422,30 @@ func LimitList(limits ...int64) []*kapi.LimitRange {
 }
 
 // CommonSpec creates and returns CommonSpec object.
-func CommonSpec(strategyType, fromKind, fromNamespace, fromName string) buildapi.CommonSpec {
-	spec := buildapi.CommonSpec{
-		Strategy: buildapi.BuildStrategy{},
+func CommonSpec(strategyType, fromKind, fromNamespace, fromName string) buildv1.CommonSpec {
+	spec := buildv1.CommonSpec{
+		Strategy: buildv1.BuildStrategy{},
 	}
 	switch strategyType {
 	case "source":
-		spec.Strategy.SourceStrategy = &buildapi.SourceBuildStrategy{
-			From: kapi.ObjectReference{
+		spec.Strategy.SourceStrategy = &buildv1.SourceBuildStrategy{
+			From: corev1.ObjectReference{
 				Kind:      fromKind,
 				Namespace: fromNamespace,
 				Name:      fromName,
 			},
 		}
 	case "docker":
-		spec.Strategy.DockerStrategy = &buildapi.DockerBuildStrategy{
-			From: &kapi.ObjectReference{
+		spec.Strategy.DockerStrategy = &buildv1.DockerBuildStrategy{
+			From: &corev1.ObjectReference{
 				Kind:      fromKind,
 				Namespace: fromNamespace,
 				Name:      fromName,
 			},
 		}
 	case "custom":
-		spec.Strategy.CustomStrategy = &buildapi.CustomBuildStrategy{
-			From: kapi.ObjectReference{
+		spec.Strategy.CustomStrategy = &buildv1.CustomBuildStrategy{
+			From: corev1.ObjectReference{
 				Kind:      fromKind,
 				Namespace: fromNamespace,
 				Name:      fromName,
diff --git a/pkg/oc/cli/admin/prune/images/images.go b/pkg/oc/cli/admin/prune/images/images.go
index 9f189e884499..c13362586c14 100644
--- a/pkg/oc/cli/admin/prune/images/images.go
+++ b/pkg/oc/cli/admin/prune/images/images.go
@@ -19,6 +19,7 @@ import (
 	gonum "github.com/gonum/graph"
 	"github.com/spf13/cobra"
 
+	corev1 "k8s.io/api/core/v1"
 	kerrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	kutilerrors "k8s.io/apimachinery/pkg/util/errors"
@@ -27,19 +28,19 @@ import (
 	apimachineryversion "k8s.io/apimachinery/pkg/version"
 	"k8s.io/apimachinery/pkg/watch"
 	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/kubernetes"
 	restclient "k8s.io/client-go/rest"
 	kclientcmd "k8s.io/client-go/tools/clientcmd"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
 	"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
 	"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
 
-	appsclient "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
-	buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset/typed/build/internalversion"
+	imagev1 "github.com/openshift/api/image/v1"
+	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
+	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
 	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	imageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/typed/image/internalversion"
 	"github.com/openshift/origin/pkg/oc/cli/admin/prune/imageprune"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
 	oserrors "github.com/openshift/origin/pkg/util/errors"
@@ -128,12 +129,12 @@ type PruneImagesOptions struct {
 	IgnoreInvalidRefs   bool
 
 	ClientConfig       *restclient.Config
-	AppsClient         appsclient.DeploymentConfigsGetter
-	BuildClient        buildclient.BuildInterface
-	ImageClient        imageclient.ImageInterface
-	ImageClientFactory func() (imageclient.ImageInterface, error)
+	AppsClient         appsv1client.AppsV1Interface
+	BuildClient        buildv1client.BuildV1Interface
+	ImageClient        imagev1client.ImageV1Interface
+	ImageClientFactory func() (imagev1client.ImageV1Interface, error)
 	DiscoveryClient    discovery.DiscoveryInterface
-	KubeClient         kclientset.Interface
+	KubeClient         kubernetes.Interface
 	Timeout            time.Duration
 	Out                io.Writer
 	ErrOut             io.Writer
@@ -208,23 +209,38 @@ func (o *PruneImagesOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, ar
 	o.Out = out
 	o.ErrOut = os.Stderr
 
-	clientConfig, err := f.ToRESTConfig()
+	var err error
+	o.ClientConfig, err = f.ToRESTConfig()
 	if err != nil {
 		return err
 	}
-	o.ClientConfig = clientConfig
-	appsClient, buildClient, imageClient, kubeClient, err := getClients(f)
+	if len(o.ClientConfig.BearerToken) == 0 {
+		return errNoToken
+	}
+	o.KubeClient, err = kubernetes.NewForConfig(o.ClientConfig)
+	if err != nil {
+		return err
+	}
+	o.AppsClient, err = appsv1client.NewForConfig(o.ClientConfig)
 	if err != nil {
 		return err
 	}
-	o.AppsClient = appsClient
-	o.BuildClient = buildClient
-	o.ImageClient = imageClient
+	o.BuildClient, err = buildv1client.NewForConfig(o.ClientConfig)
+	if err != nil {
+		return err
+	}
+	o.ImageClient, err = imagev1client.NewForConfig(o.ClientConfig)
+	if err != nil {
+		return err
+	}
+	o.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(o.ClientConfig)
+	if err != nil {
+		return err
+	}
+
 	o.ImageClientFactory = getImageClientFactory(f)
-	o.KubeClient = kubeClient
-	o.DiscoveryClient = kubeClient.Discovery()
 
-	o.Timeout = clientConfig.Timeout
+	o.Timeout = o.ClientConfig.Timeout
 	if o.Timeout == 0 {
 		o.Timeout = time.Duration(10 * time.Second)
 	}
@@ -281,7 +297,7 @@ func (o PruneImagesOptions) Run() error {
 		return err
 	}
 
-	allDSs, err := o.KubeClient.Extensions().DaemonSets(o.Namespace).List(metav1.ListOptions{})
+	allDSs, err := o.KubeClient.Apps().DaemonSets(o.Namespace).List(metav1.ListOptions{})
 	if err != nil {
 		// TODO: remove in future (3.9) release
 		if !kerrors.IsForbidden(err) {
@@ -290,7 +306,7 @@ func (o PruneImagesOptions) Run() error {
 		fmt.Fprintf(o.ErrOut, "Failed to list daemonsets: %v\n - * Make sure to update clusterRoleBindings.\n", err)
 	}
 
-	allDeployments, err := o.KubeClient.Extensions().Deployments(o.Namespace).List(metav1.ListOptions{})
+	allDeployments, err := o.KubeClient.Apps().Deployments(o.Namespace).List(metav1.ListOptions{})
 	if err != nil {
 		// TODO: remove in future (3.9) release
 		if !kerrors.IsForbidden(err) {
@@ -304,7 +320,7 @@ func (o PruneImagesOptions) Run() error {
 		return err
 	}
 
-	allRSs, err := o.KubeClient.Extensions().ReplicaSets(o.Namespace).List(metav1.ListOptions{})
+	allRSs, err := o.KubeClient.Apps().ReplicaSets(o.Namespace).List(metav1.ListOptions{})
 	if err != nil {
 		// TODO: remove in future (3.9) release
 		if !kerrors.IsForbidden(err) {
@@ -317,12 +333,12 @@ func (o PruneImagesOptions) Run() error {
 	if err != nil {
 		return err
 	}
-	limitRangesMap := make(map[string][]*kapi.LimitRange)
+	limitRangesMap := make(map[string][]*corev1.LimitRange)
 	for i := range limitRangesList.Items {
 		limit := limitRangesList.Items[i]
 		limits, ok := limitRangesMap[limit.Namespace]
 		if !ok {
-			limits = []*kapi.LimitRange{}
+			limits = []*corev1.LimitRange{}
 		}
 		limits = append(limits, &limit)
 		limitRangesMap[limit.Namespace] = limits
@@ -597,11 +613,11 @@ type describingImageStreamDeleter struct {
 
 var _ imageprune.ImageStreamDeleter = &describingImageStreamDeleter{}
 
-func (p *describingImageStreamDeleter) GetImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) {
+func (p *describingImageStreamDeleter) GetImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
 	return stream, nil
 }
 
-func (p *describingImageStreamDeleter) UpdateImageStream(stream *imageapi.ImageStream) (*imageapi.ImageStream, error) {
+func (p *describingImageStreamDeleter) UpdateImageStream(stream *imagev1.ImageStream) (*imagev1.ImageStream, error) {
 	if p.delegate == nil {
 		return stream, nil
 	}
@@ -614,7 +630,7 @@ func (p *describingImageStreamDeleter) UpdateImageStream(stream *imageapi.ImageS
 	return updatedStream, err
 }
 
-func (p *describingImageStreamDeleter) NotifyImageStreamPrune(stream *imageapi.ImageStream, updatedTags []string, deletedTags []string) {
+func (p *describingImageStreamDeleter) NotifyImageStreamPrune(stream *imagev1.ImageStream, updatedTags []string, deletedTags []string) {
 	if len(updatedTags) > 0 {
 		fmt.Fprintf(p.w, "Updating istags %s/%s: %s\n", stream.Namespace, stream.Name, strings.Join(updatedTags, ", "))
 	}
@@ -634,7 +650,7 @@ type describingImageDeleter struct {
 
 var _ imageprune.ImageDeleter = &describingImageDeleter{}
 
-func (p *describingImageDeleter) DeleteImage(image *imageapi.Image) error {
+func (p *describingImageDeleter) DeleteImage(image *imagev1.Image) error {
 	fmt.Fprintf(p.w, "Deleting image %s\n", image.Name)
 
 	if p.delegate == nil {
@@ -728,44 +744,14 @@ func (p *describingManifestDeleter) DeleteManifest(registryClient *http.Client,
 	return err
 }
 
-// getClients returns a OpenShift client and Kube client.
-func getClients(f kcmdutil.Factory) (appsclient.DeploymentConfigsGetter, buildclient.BuildInterface, imageclient.ImageInterface, kclientset.Interface, error) {
-	clientConfig, err := f.ToRESTConfig()
-	if err != nil {
-		return nil, nil, nil, nil, err
-	}
-
-	if len(clientConfig.BearerToken) == 0 {
-		return nil, nil, nil, nil, errNoToken
-	}
-
-	kubeClient, err := f.ClientSet()
-	if err != nil {
-		return nil, nil, nil, nil, err
-	}
-	appsClient, err := appsclient.NewForConfig(clientConfig)
-	if err != nil {
-		return nil, nil, nil, nil, err
-	}
-	buildClient, err := buildclient.NewForConfig(clientConfig)
-	if err != nil {
-		return nil, nil, nil, nil, err
-	}
-	imageClient, err := imageclient.NewForConfig(clientConfig)
-	if err != nil {
-		return nil, nil, nil, nil, err
-	}
-	return appsClient, buildClient, imageClient, kubeClient, err
-}
-
-func getImageClientFactory(f kcmdutil.Factory) func() (imageclient.ImageInterface, error) {
-	return func() (imageclient.ImageInterface, error) {
+func getImageClientFactory(f kcmdutil.Factory) func() (imagev1client.ImageV1Interface, error) {
+	return func() (imagev1client.ImageV1Interface, error) {
 		clientConfig, err := f.ToRESTConfig()
 		if err != nil {
 			return nil, err
 		}
 
-		return imageclient.NewForConfig(clientConfig)
+		return imagev1client.NewForConfig(clientConfig)
 	}
 }
 
diff --git a/pkg/oc/cli/admin/prune/images/images_test.go b/pkg/oc/cli/admin/prune/images/images_test.go
index 3a526c09e2f3..5c16ee06fc5e 100644
--- a/pkg/oc/cli/admin/prune/images/images_test.go
+++ b/pkg/oc/cli/admin/prune/images/images_test.go
@@ -13,37 +13,44 @@ import (
 	"testing"
 	"time"
 
+	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/util/diff"
 	"k8s.io/apimachinery/pkg/util/sets"
 	apimachineryversion "k8s.io/apimachinery/pkg/version"
 	fakediscovery "k8s.io/client-go/discovery/fake"
-	"k8s.io/client-go/kubernetes/scheme"
+	fakekubernetes "k8s.io/client-go/kubernetes/fake"
+	kubernetesscheme "k8s.io/client-go/kubernetes/scheme"
 	restclient "k8s.io/client-go/rest"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
-
 	restfake "k8s.io/client-go/rest/fake"
-
-	appsclient "github.com/openshift/client-go/apps/clientset/versioned/fake"
-	buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset/fake"
-	imageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/fake"
+	"k8s.io/kubernetes/pkg/kubectl/scheme"
+
+	"github.com/openshift/api"
+	fakeappsclient "github.com/openshift/client-go/apps/clientset/versioned/fake"
+	fakeappsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/fake"
+	fakebuildclient "github.com/openshift/client-go/build/clientset/versioned/fake"
+	fakebuildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake"
+	fakeimageclient "github.com/openshift/client-go/image/clientset/versioned/fake"
+	fakeimagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake"
 	"github.com/openshift/origin/pkg/oc/cli/admin/prune/imageprune/testutil"
 	"github.com/openshift/origin/pkg/version"
+
+	// these are needed to make kapiref.GetReference work in the prune.go file
+	_ "github.com/openshift/origin/pkg/build/apis/build/install"
 )
 
 var logLevel = flag.Int("loglevel", 0, "")
 
 func TestImagePruneNamespaced(t *testing.T) {
 	flag.Lookup("v").Value.Set(fmt.Sprint(*logLevel))
-	kFake := fake.NewSimpleClientset()
-	imageFake := imageclient.NewSimpleClientset()
+	kFake := fakekubernetes.NewSimpleClientset()
+	imageFake := &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset().Fake)}
 	opts := &PruneImagesOptions{
 		Namespace: "foo",
 
-		AppsClient:  appsclient.NewSimpleClientset().Apps(),
-		BuildClient: buildclient.NewSimpleClientset().Build(),
-		ImageClient: imageFake.Image(),
+		AppsClient:  &fakeappsv1client.FakeAppsV1{Fake: &(fakeappsclient.NewSimpleClientset().Fake)},
+		BuildClient: &fakebuildv1client.FakeBuildV1{Fake: &(fakebuildclient.NewSimpleClientset().Fake)},
+		ImageClient: imageFake,
 		KubeClient:  kFake,
 		Out:         ioutil.Discard,
 		ErrOut:      os.Stderr,
@@ -74,17 +81,20 @@ func TestImagePruneNamespaced(t *testing.T) {
 
 func TestImagePruneErrOnBadReference(t *testing.T) {
 	flag.Lookup("v").Value.Set(fmt.Sprint(*logLevel))
-	podBad := testutil.Pod("foo", "pod1", kapi.PodRunning, "invalid image reference")
-	podGood := testutil.Pod("foo", "pod2", kapi.PodRunning, "example.com/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")
+	podBad := testutil.Pod("foo", "pod1", corev1.PodRunning, "invalid image reference")
+	podGood := testutil.Pod("foo", "pod2", corev1.PodRunning, "example.com/foo/bar@sha256:0000000000000000000000000000000000000000000000000000000000000000")
 	dep := testutil.Deployment("foo", "dep1", "do not blame me")
 	bcBad := testutil.BC("foo", "bc1", "source", "ImageStreamImage", "foo", "bar:invalid-digest")
 
-	kFake := fake.NewSimpleClientset(&podBad, &podGood, &dep)
-	imageFake := imageclient.NewSimpleClientset()
+	kFake := fakekubernetes.NewSimpleClientset(&podBad, &podGood, &dep)
+	imageFake := &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset().Fake)}
 	fakeDiscovery := &fakeVersionDiscovery{
 		masterVersion: version.Get(),
 	}
 
+	// we need to install OpenShift API types to kubectl's scheme for GetReference to work
+	api.Install(scheme.Scheme)
+
 	switch d := kFake.Discovery().(type) {
 	case *fakediscovery.FakeDiscovery:
 		fakeDiscovery.FakeDiscovery = d
@@ -94,9 +104,9 @@ func TestImagePruneErrOnBadReference(t *testing.T) {
 
 	errBuf := bytes.NewBuffer(make([]byte, 0, 4096))
 	opts := &PruneImagesOptions{
-		AppsClient:      appsclient.NewSimpleClientset().Apps(),
-		BuildClient:     buildclient.NewSimpleClientset(&bcBad).Build(),
-		ImageClient:     imageFake.Image(),
+		AppsClient:      &fakeappsv1client.FakeAppsV1{Fake: &(fakeappsclient.NewSimpleClientset().Fake)},
+		BuildClient:     &fakebuildv1client.FakeBuildV1{Fake: &(fakebuildclient.NewSimpleClientset(&bcBad).Fake)},
+		ImageClient:     imageFake,
 		KubeClient:      kFake,
 		DiscoveryClient: fakeDiscovery,
 		Timeout:         time.Second,
@@ -162,7 +172,7 @@ type fakeVersionDiscovery struct {
 
 func (f *fakeVersionDiscovery) RESTClient() restclient.Interface {
 	return &restfake.RESTClient{
-		NegotiatedSerializer: scheme.Codecs,
+		NegotiatedSerializer: kubernetesscheme.Codecs,
 		Client: restfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
 			if req.URL.Path != "/version/openshift" {
 				return &http.Response{
diff --git a/pkg/oc/cli/admin/top/graph.go b/pkg/oc/cli/admin/top/graph.go
index ce47f86c41f9..6789ac6677c3 100644
--- a/pkg/oc/cli/admin/top/graph.go
+++ b/pkg/oc/cli/admin/top/graph.go
@@ -4,8 +4,9 @@ import (
 	"github.com/golang/glog"
 	gonum "github.com/gonum/graph"
 
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 
+	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	"github.com/openshift/origin/pkg/image/dockerlayer"
 	"github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
@@ -32,7 +33,7 @@ func getImageNodes(nodes []gonum.Node) []*imagegraph.ImageNode {
 	return ret
 }
 
-func addImagesToGraph(g genericgraph.Graph, images *imageapi.ImageList) {
+func addImagesToGraph(g genericgraph.Graph, images *imagev1.ImageList) {
 	for i := range images.Items {
 		image := &images.Items[i]
 
@@ -59,7 +60,7 @@ func addImagesToGraph(g genericgraph.Graph, images *imageapi.ImageList) {
 	}
 }
 
-func addImageStreamsToGraph(g genericgraph.Graph, streams *imageapi.ImageStreamList) {
+func addImageStreamsToGraph(g genericgraph.Graph, streams *imagev1.ImageStreamList) {
 	for i := range streams.Items {
 		stream := &streams.Items[i]
 		glog.V(4).Infof("Adding ImageStream %s/%s to graph", stream.Namespace, stream.Name)
@@ -87,10 +88,10 @@ func addImageStreamsToGraph(g genericgraph.Graph, streams *imageapi.ImageStreamL
 	}
 }
 
-func addPodsToGraph(g genericgraph.Graph, pods *kapi.PodList) {
+func addPodsToGraph(g genericgraph.Graph, pods *corev1.PodList) {
 	for i := range pods.Items {
 		pod := &pods.Items[i]
-		if pod.Status.Phase != kapi.PodRunning && pod.Status.Phase != kapi.PodPending {
+		if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodPending {
 			glog.V(4).Infof("Pod %s/%s is not running nor pending - skipping", pod.Namespace, pod.Name)
 			continue
 		}
@@ -101,7 +102,7 @@ func addPodsToGraph(g genericgraph.Graph, pods *kapi.PodList) {
 	}
 }
 
-func addPodSpecToGraph(g genericgraph.Graph, spec *kapi.PodSpec, predecessor gonum.Node) {
+func addPodSpecToGraph(g genericgraph.Graph, spec *corev1.PodSpec, predecessor gonum.Node) {
 	for j := range spec.Containers {
 		container := spec.Containers[j]
 
diff --git a/pkg/oc/cli/admin/top/images.go b/pkg/oc/cli/admin/top/images.go
index b8d796a1655a..8a71f236d48a 100644
--- a/pkg/oc/cli/admin/top/images.go
+++ b/pkg/oc/cli/admin/top/images.go
@@ -9,18 +9,21 @@ import (
 	"github.com/docker/go-units"
 	"github.com/spf13/cobra"
 
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/sets"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	kinternalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
 	"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
 	"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	dockerv10 "github.com/openshift/api/image/docker10"
+	imagev1 "github.com/openshift/api/image/v1"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
+	appsutil "github.com/openshift/origin/pkg/apps/util"
 	buildapi "github.com/openshift/origin/pkg/build/apis/build"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	imageclientinternal "github.com/openshift/origin/pkg/image/generated/internalclientset"
+	imageutil "github.com/openshift/origin/pkg/image/util"
 	"github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
@@ -45,9 +48,9 @@ var (
 
 type TopImagesOptions struct {
 	// internal values
-	Images  *imageapi.ImageList
-	Streams *imageapi.ImageStreamList
-	Pods    *kapi.PodList
+	Images  *imagev1.ImageList
+	Streams *imagev1.ImageStreamList
+	Pods    *corev1.PodList
 
 	genericclioptions.IOStreams
 }
@@ -83,11 +86,11 @@ func (o *TopImagesOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args
 	if err != nil {
 		return err
 	}
-	kClient, err := kinternalclientset.NewForConfig(clientConfig)
+	kClient, err := corev1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	imageClient, err := imageclientinternal.NewForConfig(clientConfig)
+	imageClient, err := imagev1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
@@ -97,19 +100,19 @@ func (o *TopImagesOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args
 		namespace = metav1.NamespaceAll
 	}
 
-	allImages, err := imageClient.Image().Images().List(metav1.ListOptions{})
+	allImages, err := imageClient.Images().List(metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
 	o.Images = allImages
 
-	allStreams, err := imageClient.Image().ImageStreams(namespace).List(metav1.ListOptions{})
+	allStreams, err := imageClient.ImageStreams(namespace).List(metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
 	o.Streams = allStreams
 
-	allPods, err := kClient.Core().Pods(namespace).List(metav1.ListOptions{})
+	allPods, err := kClient.Pods(namespace).List(metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
@@ -202,7 +205,7 @@ func (o TopImagesOptions) imagesTop() []Info {
 	return infos
 }
 
-func getStorage(image *imageapi.Image) int64 {
+func getStorage(image *imagev1.Image) int64 {
 	storage := int64(0)
 	blobSet := sets.NewString()
 	for _, layer := range image.DockerImageLayers {
@@ -212,8 +215,15 @@ func getStorage(image *imageapi.Image) int64 {
 		blobSet.Insert(layer.Name)
 		storage += layer.LayerSize
 	}
-	if len(image.DockerImageConfig) > 0 && !blobSet.Has(image.DockerImageMetadata.ID) {
-		blobSet.Insert(image.DockerImageMetadata.ID)
+	if err := imageutil.ImageWithMetadata(image); err != nil {
+		return storage
+	}
+	dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
+	if !ok {
+		return storage
+	}
+	if len(image.DockerImageConfig) > 0 && !blobSet.Has(dockerImage.ID) {
+		blobSet.Insert(dockerImage.ID)
 		storage += int64(len(image.DockerImageConfig))
 	}
 	return storage
@@ -233,11 +243,11 @@ func getImageStreamTags(g genericgraph.Graph, node *imagegraph.ImageNode) []stri
 	return istags
 }
 
-func getTags(stream *imageapi.ImageStream, image *imageapi.Image) []string {
+func getTags(stream *imagev1.ImageStream, image *imagev1.Image) []string {
 	tags := []string{}
-	for tag, history := range stream.Status.Tags {
-		if len(history.Items) > 0 && history.Items[0].Image == image.Name {
-			tags = append(tags, tag)
+	for _, tag := range stream.Status.Tags {
+		if len(tag.Items) > 0 && tag.Items[0].Image == image.Name {
+			tags = append(tags, tag.Tag)
 		}
 	}
 	imageapi.PrioritizeTags(tags)
@@ -268,7 +278,7 @@ func getImageUsage(g genericgraph.Graph, node *imagegraph.ImageNode) []string {
 	return usage
 }
 
-func getController(pod *kapi.Pod) string {
+func getController(pod *corev1.Pod) string {
 	controller := "<unknown>"
 	if pod.Annotations == nil {
 		return controller
@@ -277,10 +287,10 @@ func getController(pod *kapi.Pod) string {
 	if bc, ok := pod.Annotations[buildapi.BuildAnnotation]; ok {
 		return fmt.Sprintf("Build: %s/%s", pod.Namespace, bc)
 	}
-	if dc, ok := pod.Annotations[appsapi.DeploymentAnnotation]; ok {
+	if dc, ok := pod.Annotations[appsutil.DeploymentAnnotation]; ok {
 		return fmt.Sprintf("Deployment: %s/%s", pod.Namespace, dc)
 	}
-	if dc, ok := pod.Annotations[appsapi.DeploymentPodAnnotation]; ok {
+	if dc, ok := pod.Annotations[appsutil.DeploymentPodAnnotation]; ok {
 		return fmt.Sprintf("Deployer: %s/%s", pod.Namespace, dc)
 	}
 
diff --git a/pkg/oc/cli/admin/top/images_test.go b/pkg/oc/cli/admin/top/images_test.go
index a0fbadae709a..aea5b178f5e5 100644
--- a/pkg/oc/cli/admin/top/images_test.go
+++ b/pkg/oc/cli/admin/top/images_test.go
@@ -3,43 +3,45 @@ package top
 import (
 	"testing"
 
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	"k8s.io/apimachinery/pkg/runtime"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	imagev1 "github.com/openshift/api/image/v1"
+	appsutil "github.com/openshift/origin/pkg/apps/util"
 	buildapi "github.com/openshift/origin/pkg/build/apis/build"
-	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	"github.com/openshift/origin/pkg/image/dockerlayer"
 )
 
 func TestImagesTop(t *testing.T) {
 	testCases := map[string]struct {
-		images   *imageapi.ImageList
-		streams  *imageapi.ImageStreamList
-		pods     *kapi.PodList
+		images   *imagev1.ImageList
+		streams  *imagev1.ImageStreamList
+		pods     *corev1.PodList
 		expected []Info
 	}{
 		"no metadata": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{},
+			pods: &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -51,11 +53,11 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"with metadata": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1", LayerSize: int64(512)},
 							{Name: "layer2", LayerSize: int64(512)},
 						},
@@ -63,21 +65,22 @@ func TestImagesTop(t *testing.T) {
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{},
+			pods: &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -90,24 +93,24 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"with metadata and image config": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1", LayerSize: int64(512)},
 							{Name: "layer2", LayerSize: int64(512)},
 						},
-						DockerImageManifest: "non empty metadata",
+						DockerImageManifest: `{"schemaVersion": 1, "history": [{"v1Compatibility": "{\"id\":\"2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7\",\"parent\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"created\":\"2015-02-21T02:11:06.735146646Z\",\"container\":\"c9a3eda5951d28aa8dbe5933be94c523790721e4f80886d0a8e7a710132a38ec\",\"container_config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/bin/bash]\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"43bd710ec89a\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"117ee323aaa9d1b136ea55e4421f4ce413dfc6c0cc6b2186dea6c88d93e1ad7c\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[]},\"architecture\":\"amd64\",\"os\":\"linux\",\"checksum\":\"tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\"Size\":0}\n"}]}`,
 						DockerImageConfig:   "raw image config",
-						DockerImageMetadata: imageapi.DockerImage{
-							ID: "manifestConfigID",
+						DockerImageMetadata: runtime.RawExtension{
+							Raw: []byte(`{"Id":"manifestConfigID"}`),
 						},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{},
-			pods:    &kapi.PodList{},
+			streams: &imagev1.ImageStreamList{},
+			pods:    &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:    "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -119,25 +122,22 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"with metadata and image config and some layers duplicated": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1", LayerSize: int64(512)},
 							{Name: "layer2", LayerSize: int64(256)},
 							{Name: "layer1", LayerSize: int64(512)},
 						},
 						DockerImageManifest: "non empty metadata",
 						DockerImageConfig:   "raw image config",
-						DockerImageMetadata: imageapi.DockerImage{
-							ID: "layer2",
-						},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{},
-			pods:    &kapi.PodList{},
+			streams: &imagev1.ImageStreamList{},
+			pods:    &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:    "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -149,32 +149,34 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"multiple tags": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:        metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imageapi.ImageLayer{{Name: "layer1"}, {Name: "layer2"}},
+						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1"}, {Name: "layer2"}},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
-								"tag2": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+								{
+									Tag:   "tag2",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{},
+			pods: &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -186,42 +188,45 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"multiple streams": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:        metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers: []imageapi.ImageLayer{{Name: "layer1"}, {Name: "layer2"}},
+						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1"}, {Name: "layer2"}},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
-								"tag2": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+								{
+									Tag:   "tag2",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream2", Namespace: "ns2"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{},
+			pods: &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -233,13 +238,13 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"image without a stream": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{},
-			pods:    &kapi.PodList{},
+			streams: &imagev1.ImageStreamList{},
+			pods:    &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -251,16 +256,16 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"image parents": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:          metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers:   []imageapi.ImageLayer{{Name: "layer1"}},
+						DockerImageLayers:   []imagev1.ImageLayer{{Name: "layer1"}},
 						DockerImageManifest: "non empty metadata",
 					},
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1"},
 							{Name: "layer2"},
 						},
@@ -268,8 +273,8 @@ func TestImagesTop(t *testing.T) {
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{},
-			pods:    &kapi.PodList{},
+			streams: &imagev1.ImageStreamList{},
+			pods:    &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -288,16 +293,16 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"image parents with empty layer": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:          metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers:   []imageapi.ImageLayer{{Name: "layer1"}},
+						DockerImageLayers:   []imagev1.ImageLayer{{Name: "layer1"}},
 						DockerImageManifest: "non empty metadata",
 					},
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1"},
 							{Name: dockerlayer.DigestSha256EmptyTar},
 							{Name: "layer2"},
@@ -306,8 +311,8 @@ func TestImagesTop(t *testing.T) {
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{},
-			pods:    &kapi.PodList{},
+			streams: &imagev1.ImageStreamList{},
+			pods:    &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -326,16 +331,16 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"image parents with gzipped empty layer": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:          metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"},
-						DockerImageLayers:   []imageapi.ImageLayer{{Name: "layer1"}},
+						DockerImageLayers:   []imagev1.ImageLayer{{Name: "layer1"}},
 						DockerImageManifest: "non empty metadata",
 					},
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1"},
 							{Name: dockerlayer.GzippedEmptyLayerDigest},
 							{Name: "layer2"},
@@ -344,8 +349,8 @@ func TestImagesTop(t *testing.T) {
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{},
-			pods:    &kapi.PodList{},
+			streams: &imagev1.ImageStreamList{},
+			pods:    &corev1.PodList{},
 			expected: []Info{
 				imageInfo{
 					Image:           "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a",
@@ -364,31 +369,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"build pending": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
 						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{buildapi.BuildAnnotation: "build1"}},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodPending},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodPending},
 					},
 				},
 			},
@@ -403,31 +409,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"build running": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
 						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{buildapi.BuildAnnotation: "build1"}},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodRunning},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
 					},
 				},
 			},
@@ -442,31 +449,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"deployer pending": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsapi.DeploymentPodAnnotation: "deployer1"}},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodPending},
+						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsutil.DeploymentPodAnnotation: "deployer1"}},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodPending},
 					},
 				},
 			},
@@ -481,31 +489,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"deployer running": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsapi.DeploymentPodAnnotation: "deployer1"}},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodRunning},
+						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsutil.DeploymentPodAnnotation: "deployer1"}},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
 					},
 				},
 			},
@@ -520,31 +529,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"deployement pending": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsapi.DeploymentAnnotation: "deplyment1"}},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodPending},
+						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsutil.DeploymentAnnotation: "deplyment1"}},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodPending},
 					},
 				},
 			},
@@ -559,31 +569,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"deployment running": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
-						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsapi.DeploymentAnnotation: "deplyment1"}},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodRunning},
+						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{appsutil.DeploymentAnnotation: "deplyment1"}},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
 					},
 				},
 			},
@@ -598,31 +609,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"unknown controller 1": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
 						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodRunning},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
 					},
 				},
 			},
@@ -637,31 +649,32 @@ func TestImagesTop(t *testing.T) {
 			},
 		},
 		"unknown controller 2": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{ObjectMeta: metav1.ObjectMeta{Name: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}},
 								},
 							},
 						},
 					},
 				},
 			},
-			pods: &kapi.PodList{
-				Items: []kapi.Pod{
+			pods: &corev1.PodList{
+				Items: []corev1.Pod{
 					{
 						ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Annotations: map[string]string{"unknown controller": "unknown"}},
-						Spec:       kapi.PodSpec{Containers: []kapi.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
-						Status:     kapi.PodStatus{Phase: kapi.PodRunning},
+						Spec:       corev1.PodSpec{Containers: []corev1.Container{{Image: "image@sha256:08151bf2fc92355f236918bb16905921e6f66e1d03100fb9b18d60125db3df3a"}}},
+						Status:     corev1.PodStatus{Phase: corev1.PodRunning},
 					},
 				},
 			},
diff --git a/pkg/oc/cli/admin/top/imagestreams.go b/pkg/oc/cli/admin/top/imagestreams.go
index 3d99c062bc90..d3c742f938f6 100644
--- a/pkg/oc/cli/admin/top/imagestreams.go
+++ b/pkg/oc/cli/admin/top/imagestreams.go
@@ -15,10 +15,11 @@ import (
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
 	"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
 
+	dockerv10 "github.com/openshift/api/image/docker10"
+	imagev1 "github.com/openshift/api/image/v1"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
+	imageutil "github.com/openshift/origin/pkg/image/util"
 	"github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-
-	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	imageclientinternal "github.com/openshift/origin/pkg/image/generated/internalclientset"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
 )
 
@@ -38,8 +39,8 @@ var (
 
 type TopImageStreamsOptions struct {
 	// internal values
-	Images  *imageapi.ImageList
-	Streams *imageapi.ImageStreamList
+	Images  *imagev1.ImageList
+	Streams *imagev1.ImageStreamList
 
 	genericclioptions.IOStreams
 }
@@ -79,18 +80,18 @@ func (o *TopImageStreamsOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command
 	if err != nil {
 		return err
 	}
-	imageClient, err := imageclientinternal.NewForConfig(clientConfig)
+	imageClient, err := imagev1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
 
-	allImages, err := imageClient.Image().Images().List(metav1.ListOptions{})
+	allImages, err := imageClient.Images().List(metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
 	o.Images = allImages
 
-	allStreams, err := imageClient.Image().ImageStreams(namespace).List(metav1.ListOptions{})
+	allStreams, err := imageClient.ImageStreams(namespace).List(metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
@@ -183,8 +184,15 @@ func getImageStreamSize(g genericgraph.Graph, node *imagegraph.ImageStreamNode)
 			blobSet.Insert(layer.Name)
 			storage += layer.LayerSize
 		}
-		if len(image.DockerImageConfig) > 0 && !blobSet.Has(image.DockerImageMetadata.ID) {
-			blobSet.Insert(image.DockerImageMetadata.ID)
+		if err := imageutil.ImageWithMetadata(image); err != nil {
+			continue
+		}
+		dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
+		if !ok {
+			continue
+		}
+		if len(image.DockerImageConfig) > 0 && !blobSet.Has(dockerImage.ID) {
+			blobSet.Insert(dockerImage.ID)
 			storage += int64(len(image.DockerImageConfig))
 		}
 	}
diff --git a/pkg/oc/cli/admin/top/imagestreams_test.go b/pkg/oc/cli/admin/top/imagestreams_test.go
index cbef959ab3fe..c8f6b7ab8897 100644
--- a/pkg/oc/cli/admin/top/imagestreams_test.go
+++ b/pkg/oc/cli/admin/top/imagestreams_test.go
@@ -4,27 +4,30 @@ import (
 	"testing"
 
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
 	kapihelper "k8s.io/kubernetes/pkg/apis/core/helper"
 
-	imageapi "github.com/openshift/origin/pkg/image/apis/image"
+	dockerv10 "github.com/openshift/api/image/docker10"
+	imagev1 "github.com/openshift/api/image/v1"
 )
 
 func TestImageStreamsTop(t *testing.T) {
 	testCases := map[string]struct {
-		images   *imageapi.ImageList
-		streams  *imageapi.ImageStreamList
+		images   *imagev1.ImageList
+		streams  *imagev1.ImageStreamList
 		expected []Info
 	}{
 		"empty image stream": {
-			images: &imageapi.ImageList{},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			images: &imagev1.ImageList{},
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "image1"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "image1"}},
 								},
 							},
 						},
@@ -40,22 +43,23 @@ func TestImageStreamsTop(t *testing.T) {
 			},
 		},
 		"no storage": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imageapi.ImageLayer{{Name: "layer1"}},
+						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1"}},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "image1"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "image1"}},
 								},
 							},
 						},
@@ -71,22 +75,23 @@ func TestImageStreamsTop(t *testing.T) {
 			},
 		},
 		"with storage": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imageapi.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
+						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "image1"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "image1"}},
 								},
 							},
 						},
@@ -103,25 +108,26 @@ func TestImageStreamsTop(t *testing.T) {
 			},
 		},
 		"multiple layers": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1", LayerSize: 1024},
 							{Name: "layer2", LayerSize: 512},
 						},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "image1"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "image1"}},
 								},
 							},
 						},
@@ -138,32 +144,34 @@ func TestImageStreamsTop(t *testing.T) {
 			},
 		},
 		"multiple images": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imageapi.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
+						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
 					},
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1", LayerSize: int64(1024)},
 							{Name: "layer2", LayerSize: int64(128)},
 						},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "image1"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "image1"}},
 								},
-								"tag2": {
-									Items: []imageapi.TagEvent{{Image: "image2"}},
+								{
+									Tag:   "tag2",
+									Items: []imagev1.TagEvent{{Image: "image2"}},
 								},
 							},
 						},
@@ -180,40 +188,46 @@ func TestImageStreamsTop(t *testing.T) {
 			},
 		},
 		"multiple images with manifest config": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imageapi.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
+						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
 						DockerImageConfig: "raw image config",
-						DockerImageMetadata: imageapi.DockerImage{
-							ID: "manifestConfigID",
+						DockerImageMetadata: runtime.RawExtension{
+							Object: &dockerv10.DockerImage{
+								ID: "manifestConfigID",
+							},
 						},
 					},
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1", LayerSize: int64(1024)},
 							{Name: "layer2", LayerSize: int64(128)},
 						},
 						DockerImageConfig: "raw image config",
-						DockerImageMetadata: imageapi.DockerImage{
-							ID: "manifestConfigID",
+						DockerImageMetadata: runtime.RawExtension{
+							Object: &dockerv10.DockerImage{
+								ID: "manifestConfigID",
+							},
 						},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "image1"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "image1"}},
 								},
-								"tag2": {
-									Items: []imageapi.TagEvent{{Image: "image2"}},
+								{
+									Tag:   "tag2",
+									Items: []imagev1.TagEvent{{Image: "image2"}},
 								},
 							},
 						},
@@ -230,29 +244,30 @@ func TestImageStreamsTop(t *testing.T) {
 			},
 		},
 		"multiple unreferenced images": {
-			images: &imageapi.ImageList{
-				Items: []imageapi.Image{
+			images: &imagev1.ImageList{
+				Items: []imagev1.Image{
 					{
 						ObjectMeta:        metav1.ObjectMeta{Name: "image1"},
-						DockerImageLayers: []imageapi.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
+						DockerImageLayers: []imagev1.ImageLayer{{Name: "layer1", LayerSize: int64(1024)}},
 					},
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "image2"},
-						DockerImageLayers: []imageapi.ImageLayer{
+						DockerImageLayers: []imagev1.ImageLayer{
 							{Name: "layer1", LayerSize: int64(1024)},
 							{Name: "layer2", LayerSize: int64(128)},
 						},
 					},
 				},
 			},
-			streams: &imageapi.ImageStreamList{
-				Items: []imageapi.ImageStream{
+			streams: &imagev1.ImageStreamList{
+				Items: []imagev1.ImageStream{
 					{
 						ObjectMeta: metav1.ObjectMeta{Name: "stream1", Namespace: "ns1"},
-						Status: imageapi.ImageStreamStatus{
-							Tags: map[string]imageapi.TagEventList{
-								"tag1": {
-									Items: []imageapi.TagEvent{{Image: "image1"}},
+						Status: imagev1.ImageStreamStatus{
+							Tags: []imagev1.NamedTagEventList{
+								{
+									Tag:   "tag1",
+									Items: []imagev1.TagEvent{{Image: "image1"}},
 								},
 							},
 						},
diff --git a/pkg/oc/cli/cancelbuild/cancelbuild.go b/pkg/oc/cli/cancelbuild/cancelbuild.go
index 0de22531bca6..cfec6cb36bf4 100644
--- a/pkg/oc/cli/cancelbuild/cancelbuild.go
+++ b/pkg/oc/cli/cancelbuild/cancelbuild.go
@@ -22,9 +22,6 @@ import (
 	buildclientset "github.com/openshift/client-go/build/clientset/versioned"
 	buildtv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
 	buildlisterv1 "github.com/openshift/client-go/build/listers/build/v1"
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
-	buildinternalhelpers "github.com/openshift/origin/pkg/build/apis/build/internal_helpers"
-	buildapiv1 "github.com/openshift/origin/pkg/build/apis/build/v1"
 	buildclientinternal "github.com/openshift/origin/pkg/build/client"
 	buildclientv1 "github.com/openshift/origin/pkg/build/client/v1"
 	buildutil "github.com/openshift/origin/pkg/build/util"
@@ -213,12 +210,7 @@ func (o *CancelBuildOptions) RunCancelBuild() error {
 			}
 		}
 
-		internalBuildStatus := &buildapi.BuildStatus{}
-		if err := buildapiv1.Convert_v1_BuildStatus_To_build_BuildStatus(&build.Status, internalBuildStatus, nil); err != nil {
-			return err
-		}
-
-		if stateMatch && !buildinternalhelpers.IsTerminalPhase(internalBuildStatus.Phase) {
+		if stateMatch && !buildutil.IsTerminalPhase(build.Status.Phase) {
 			builds = append(builds, build)
 		}
 	}
diff --git a/pkg/oc/cli/debug/debug.go b/pkg/oc/cli/debug/debug.go
index 4fe8a4b83c25..91fc69286aff 100644
--- a/pkg/oc/cli/debug/debug.go
+++ b/pkg/oc/cli/debug/debug.go
@@ -39,10 +39,10 @@ import (
 	"k8s.io/kubernetes/pkg/util/interrupt"
 
 	appsv1 "github.com/openshift/api/apps/v1"
+	dockerv10 "github.com/openshift/api/image/docker10"
 	imagev1 "github.com/openshift/api/image/v1"
 	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
 	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
 	appsutil "github.com/openshift/origin/pkg/apps/util"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	imageutil "github.com/openshift/origin/pkg/image/util"
@@ -486,7 +486,7 @@ func (o *DebugOptions) getContainerImageViaDeploymentConfig(pod *corev1.Pod, con
 		return nil, nil // ID is needed for later lookup
 	}
 
-	dcname := pod.Annotations[appsapi.DeploymentConfigAnnotation]
+	dcname := pod.Annotations[appsutil.DeploymentConfigAnnotation]
 	if dcname == "" {
 		return nil, nil // Pod doesn't appear to have been created by a DeploymentConfig
 	}
@@ -573,8 +573,11 @@ func (o *DebugOptions) getContainerImageCommand(pod *corev1.Pod, container *core
 		return nil, fmt.Errorf("error: no usable image found")
 	}
 
-	dockerImage, err := imageutil.GetImageMetadata(image)
-	if err != nil {
+	if err := imageutil.ImageWithMetadata(image); err != nil {
+		return nil, err
+	}
+	dockerImage, ok := image.DockerImageMetadata.Object.(*dockerv10.DockerImage)
+	if !ok {
 		return nil, err
 	}
 
diff --git a/pkg/oc/cli/rollout/retry.go b/pkg/oc/cli/rollout/retry.go
index ae43527841cc..ed8e0d658005 100644
--- a/pkg/oc/cli/rollout/retry.go
+++ b/pkg/oc/cli/rollout/retry.go
@@ -214,7 +214,7 @@ func (o RetryOptions) Run() error {
 		patches := set.CalculatePatchesExternal([]*resource.Info{{Object: rc, Mapping: mapping}}, func(info *resource.Info) (bool, error) {
 			rc.Annotations[appsapi.DeploymentStatusAnnotation] = string(appsapi.DeploymentStatusNew)
 			delete(rc.Annotations, appsapi.DeploymentStatusReasonAnnotation)
-			delete(rc.Annotations, appsapi.DeploymentCancelledAnnotation)
+			delete(rc.Annotations, appsutil.DeploymentCancelledAnnotation)
 			return true, nil
 		})
 
diff --git a/pkg/oc/cli/status/status.go b/pkg/oc/cli/status/status.go
index 111e9eac860c..7f8d1f9fe24a 100644
--- a/pkg/oc/cli/status/status.go
+++ b/pkg/oc/cli/status/status.go
@@ -8,18 +8,18 @@ import (
 	"github.com/spf13/cobra"
 
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+	"k8s.io/client-go/kubernetes"
 	"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
 	"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
 
-	appsclient "github.com/openshift/client-go/apps/clientset/versioned"
-	buildclientinternal "github.com/openshift/origin/pkg/build/generated/internalclientset"
-	imageclientinternal "github.com/openshift/origin/pkg/image/generated/internalclientset"
+	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
+	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
+	projectv1client "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1"
+	routev1client "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1"
 	"github.com/openshift/origin/pkg/oc/lib/describe"
 	loginutil "github.com/openshift/origin/pkg/oc/util/project"
-	projectclientinternal "github.com/openshift/origin/pkg/project/generated/internalclientset"
-	routeclientinternal "github.com/openshift/origin/pkg/route/generated/internalclientset"
 	dotutil "github.com/openshift/origin/pkg/util/dot"
 )
 
@@ -114,27 +114,27 @@ func (o *StatusOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, baseCLI
 	if err != nil {
 		return err
 	}
-	kclientset, err := kclientset.NewForConfig(clientConfig)
+	kclientset, err := kubernetes.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	projectClient, err := projectclientinternal.NewForConfig(clientConfig)
+	projectClient, err := projectv1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	buildClient, err := buildclientinternal.NewForConfig(clientConfig)
+	buildClient, err := buildv1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	imageClient, err := imageclientinternal.NewForConfig(clientConfig)
+	imageClient, err := imagev1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	appsClient, err := appsclient.NewForConfig(clientConfig)
+	appsClient, err := appsv1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
-	routeClient, err := routeclientinternal.NewForConfig(clientConfig)
+	routeClient, err := routev1client.NewForConfig(clientConfig)
 	if err != nil {
 		return err
 	}
@@ -173,11 +173,11 @@ func (o *StatusOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, baseCLI
 	o.describer = &describe.ProjectStatusDescriber{
 		KubeClient:    kclientset,
 		RESTMapper:    restMapper,
-		ProjectClient: projectClient.Project(),
-		BuildClient:   buildClient.Build(),
-		ImageClient:   imageClient.Image(),
-		AppsClient:    appsClient.Apps(),
-		RouteClient:   routeClient.Route(),
+		ProjectClient: projectClient,
+		BuildClient:   buildClient,
+		ImageClient:   imageClient,
+		AppsClient:    appsClient,
+		RouteClient:   routeClient,
 		Suggest:       o.suggest,
 		Server:        clientConfig.Host,
 
diff --git a/pkg/oc/lib/describe/chaindescriber.go b/pkg/oc/lib/describe/chaindescriber.go
index 938b806bffb4..3dd1824af613 100644
--- a/pkg/oc/lib/describe/chaindescriber.go
+++ b/pkg/oc/lib/describe/chaindescriber.go
@@ -13,8 +13,8 @@ import (
 	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 	"k8s.io/apimachinery/pkg/util/sets"
 
-	buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset/typed/build/internalversion"
-	imageapi "github.com/openshift/origin/pkg/image/apis/image"
+	imagev1 "github.com/openshift/api/image/v1"
+	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
 	buildedges "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph"
 	buildanalysis "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/analysis"
 	buildgraph "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/nodes"
@@ -38,14 +38,14 @@ func (e NotFoundErr) Error() string {
 // ChainDescriber generates extended information about a chain of
 // dependencies of an image stream
 type ChainDescriber struct {
-	c            buildclient.BuildConfigsGetter
+	c            buildv1client.BuildConfigsGetter
 	namespaces   sets.String
 	outputFormat string
 	namer        osgraph.Namer
 }
 
 // NewChainDescriber returns a new ChainDescriber
-func NewChainDescriber(c buildclient.BuildConfigsGetter, namespaces sets.String, out string) *ChainDescriber {
+func NewChainDescriber(c buildv1client.BuildConfigsGetter, namespaces sets.String, out string) *ChainDescriber {
 	return &ChainDescriber{c: c, namespaces: namespaces, outputFormat: out, namer: namespacedFormatter{hideNamespace: true}}
 }
 
@@ -81,7 +81,7 @@ func (d *ChainDescriber) MakeGraph() (osgraph.Graph, error) {
 // image stream tag (name:tag) in namespace. Namespace is needed here
 // because image stream tags with the same name can be found across
 // different namespaces.
-func (d *ChainDescriber) Describe(ist *imageapi.ImageStreamTag, includeInputImages, reverse bool) (string, error) {
+func (d *ChainDescriber) Describe(ist *imagev1.ImageStreamTag, includeInputImages, reverse bool) (string, error) {
 	g, err := d.MakeGraph()
 	if err != nil {
 		return "", err
diff --git a/pkg/oc/lib/describe/chaindescriber_test.go b/pkg/oc/lib/describe/chaindescriber_test.go
index d18250be488b..6ac704619c2f 100644
--- a/pkg/oc/lib/describe/chaindescriber_test.go
+++ b/pkg/oc/lib/describe/chaindescriber_test.go
@@ -10,9 +10,9 @@ import (
 
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/kubernetes/pkg/api/legacyscheme"
 
-	buildfakeclient "github.com/openshift/origin/pkg/build/generated/internalclientset/fake"
+	fakebuildclient "github.com/openshift/client-go/build/clientset/versioned/fake"
+	fakebuildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake"
 	buildclientscheme "github.com/openshift/origin/pkg/build/generated/internalclientset/scheme"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
 )
@@ -203,16 +203,16 @@ func TestChainDescriber(t *testing.T) {
 			objs := []runtime.Object{}
 			if len(test.path) > 0 {
 				var err error
-				objs, err = readObjectsFromPath(test.path, test.defaultNamespace, legacyscheme.Codecs.UniversalDecoder(), legacyscheme.Scheme)
+				objs, err = readObjectsFromPath(test.path, test.defaultNamespace)
 				if err != nil {
 					t.Fatal(err)
 				}
 			}
 			ist := imagegraph.MakeImageStreamTagObjectMeta(test.defaultNamespace, test.name, test.tag)
 
-			fakeClient := buildfakeclient.NewSimpleClientset(filterByScheme(buildclientscheme.Scheme, objs...)...)
+			fakeClient := &fakebuildv1client.FakeBuildV1{Fake: &(fakebuildclient.NewSimpleClientset(filterByScheme(buildclientscheme.Scheme, objs...)...).Fake)}
 
-			desc, err := NewChainDescriber(fakeClient.Build(), test.namespaces, test.output).Describe(ist, test.includeInputImg, test.reverse)
+			desc, err := NewChainDescriber(fakeClient, test.namespaces, test.output).Describe(ist, test.includeInputImg, test.reverse)
 			t.Logf("%s: output:\n%s\n\n", test.testName, desc)
 			if err != test.expectedErr {
 				t.Fatalf("%s: error mismatch: expected %v, got %v", test.testName, test.expectedErr, err)
diff --git a/pkg/oc/lib/describe/deployments.go b/pkg/oc/lib/describe/deployments.go
index 8108e5c6d853..f2a1e3f8a3cc 100644
--- a/pkg/oc/lib/describe/deployments.go
+++ b/pkg/oc/lib/describe/deployments.go
@@ -25,7 +25,6 @@ import (
 	appsv1 "github.com/openshift/api/apps/v1"
 	appstypedclient "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
 	"github.com/openshift/origin/pkg/api/legacy"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
 	appsutil "github.com/openshift/origin/pkg/apps/util"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	appsedges "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
@@ -494,19 +493,9 @@ func (d *LatestDeploymentsDescriber) Describe(namespace, name string) (string, e
 	}
 
 	g := genericgraph.New()
-	// TODO: Remove conversion when we move the appsgraph to external types
-	internalConfig := &appsapi.DeploymentConfig{}
-	if err := legacyscheme.Scheme.Convert(config, internalConfig, nil); err != nil {
-		return "", fmt.Errorf("conversion error: %v", err)
-	}
-	dcNode := appsgraph.EnsureDeploymentConfigNode(g, internalConfig)
+	dcNode := appsgraph.EnsureDeploymentConfigNode(g, config)
 	for i := range deployments {
-		// TODO: Remove when kubegraph use external types
-		internalDeployment := &kapi.ReplicationController{}
-		if err := legacyscheme.Scheme.Convert(&deployments[i], internalDeployment, nil); err != nil {
-			return "", fmt.Errorf("conversion error: %v", err)
-		}
-		kubegraph.EnsureReplicationControllerNode(g, internalDeployment)
+		kubegraph.EnsureReplicationControllerNode(g, &deployments[i])
 	}
 	appsedges.AddTriggerDeploymentConfigsEdges(g, dcNode)
 	appsedges.AddDeploymentConfigsDeploymentEdges(g, dcNode)
diff --git a/pkg/oc/lib/describe/describer.go b/pkg/oc/lib/describe/describer.go
index 334fe47a1845..cbf8cb8d7425 100644
--- a/pkg/oc/lib/describe/describer.go
+++ b/pkg/oc/lib/describe/describer.go
@@ -13,6 +13,7 @@ import (
 	"github.com/golang/glog"
 	"k8s.io/client-go/kubernetes"
 
+	corev1 "k8s.io/api/core/v1"
 	kerrs "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/meta"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -36,6 +37,7 @@ import (
 	"github.com/openshift/api/project"
 	"github.com/openshift/api/quota"
 	"github.com/openshift/api/route"
+	routev1 "github.com/openshift/api/route/v1"
 	"github.com/openshift/api/security"
 	"github.com/openshift/api/template"
 	"github.com/openshift/api/user"
@@ -58,6 +60,7 @@ import (
 	quotaapi "github.com/openshift/origin/pkg/quota/apis/quota"
 	quotaclient "github.com/openshift/origin/pkg/quota/generated/internalclientset/typed/quota/internalversion"
 	routeapi "github.com/openshift/origin/pkg/route/apis/route"
+	routev1conversions "github.com/openshift/origin/pkg/route/apis/route/v1"
 	routeclient "github.com/openshift/origin/pkg/route/generated/internalclientset/typed/route/internalversion"
 	securityapi "github.com/openshift/origin/pkg/security/apis/security"
 	securityclient "github.com/openshift/origin/pkg/security/generated/internalclientset/typed/security/internalversion"
@@ -875,10 +878,14 @@ func (d *RouteDescriber) Describe(namespace, name string, settings kprinters.Des
 				if len(ingress.RouterCanonicalHostname) > 0 {
 					hostName = fmt.Sprintf(" (host %s)", ingress.RouterCanonicalHostname)
 				}
-				switch status, condition := routedisplayhelpers.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status {
-				case kapi.ConditionTrue:
+				external := routev1.RouteIngress{}
+				if err := routev1conversions.Convert_route_RouteIngress_To_v1_RouteIngress(&ingress, &external, nil); err != nil {
+					return err
+				}
+				switch status, condition := routedisplayhelpers.IngressConditionStatus(&external, routev1.RouteAdmitted); status {
+				case corev1.ConditionTrue:
 					fmt.Fprintf(out, "\t  exposed on router %s%s %s ago\n", ingress.RouterName, hostName, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time)))
-				case kapi.ConditionFalse:
+				case corev1.ConditionFalse:
 					fmt.Fprintf(out, "\t  rejected by router %s: %s%s (%s ago)\n", ingress.RouterName, hostName, condition.Reason, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time)))
 					if len(condition.Message) > 0 {
 						fmt.Fprintf(out, "\t    %s\n", condition.Message)
@@ -897,10 +904,14 @@ func (d *RouteDescriber) Describe(namespace, name string, settings kprinters.Des
 			if len(ingress.RouterCanonicalHostname) > 0 {
 				hostName = fmt.Sprintf(" (host %s)", ingress.RouterCanonicalHostname)
 			}
-			switch status, condition := routedisplayhelpers.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status {
-			case kapi.ConditionTrue:
+			external := routev1.RouteIngress{}
+			if err := routev1conversions.Convert_route_RouteIngress_To_v1_RouteIngress(&ingress, &external, nil); err != nil {
+				return err
+			}
+			switch status, condition := routedisplayhelpers.IngressConditionStatus(&external, routev1.RouteAdmitted); status {
+			case corev1.ConditionTrue:
 				fmt.Fprintf(out, "\t%s exposed on router %s %s%s ago\n", ingress.Host, ingress.RouterName, hostName, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time)))
-			case kapi.ConditionFalse:
+			case corev1.ConditionFalse:
 				fmt.Fprintf(out, "\trejected by router %s: %s%s (%s ago)\n", ingress.RouterName, hostName, condition.Reason, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time)))
 				if len(condition.Message) > 0 {
 					fmt.Fprintf(out, "\t  %s\n", condition.Message)
diff --git a/pkg/oc/lib/describe/projectstatus.go b/pkg/oc/lib/describe/projectstatus.go
index 8301e4ec61e2..51501d05c409 100644
--- a/pkg/oc/lib/describe/projectstatus.go
+++ b/pkg/oc/lib/describe/projectstatus.go
@@ -8,33 +8,35 @@ import (
 	"strings"
 	"text/tabwriter"
 
+	kappsv1 "k8s.io/api/apps/v1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	corev1 "k8s.io/api/core/v1"
 	kapierrors "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/meta"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
 	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/client-go/kubernetes"
+	kappsv1client "k8s.io/client-go/kubernetes/typed/apps/v1"
+	autoscalingv1client "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
+	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
 	"k8s.io/kubernetes/pkg/api/legacyscheme"
-	kapps "k8s.io/kubernetes/pkg/apis/apps"
-	"k8s.io/kubernetes/pkg/apis/autoscaling"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	kapisext "k8s.io/kubernetes/pkg/apis/extensions"
-	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
-	kappsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
-	kautoscalingclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion"
-	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
-	kapisextclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
 	deployutil "k8s.io/kubernetes/pkg/controller/deployment/util"
 
 	appsv1 "github.com/openshift/api/apps/v1"
-	appsclient "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
+	buildv1 "github.com/openshift/api/build/v1"
+	imagev1 "github.com/openshift/api/image/v1"
+	projectv1 "github.com/openshift/api/project/v1"
+	routev1 "github.com/openshift/api/route/v1"
+	appsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1"
+	buildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1"
+	imagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1"
+	projectv1client "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1"
+	routev1client "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1"
 	oapi "github.com/openshift/origin/pkg/api"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
 	appsutil "github.com/openshift/origin/pkg/apps/util"
 	buildapi "github.com/openshift/origin/pkg/build/apis/build"
-	buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset/typed/build/internalversion"
-	imageapi "github.com/openshift/origin/pkg/image/apis/image"
-	imageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/typed/image/internalversion"
 	loginerrors "github.com/openshift/origin/pkg/oc/lib/errors"
 	appsedges "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	appsanalysis "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/analysis"
@@ -53,10 +55,6 @@ import (
 	routeanalysis "github.com/openshift/origin/pkg/oc/lib/graph/routegraph/analysis"
 	routegraph "github.com/openshift/origin/pkg/oc/lib/graph/routegraph/nodes"
 	"github.com/openshift/origin/pkg/oc/lib/routedisplayhelpers"
-	projectapi "github.com/openshift/origin/pkg/project/apis/project"
-	projectclient "github.com/openshift/origin/pkg/project/generated/internalclientset/typed/project/internalversion"
-	routeapi "github.com/openshift/origin/pkg/route/apis/route"
-	routeclient "github.com/openshift/origin/pkg/route/generated/internalclientset/typed/route/internalversion"
 	"github.com/openshift/origin/pkg/util/errors"
 	"github.com/openshift/origin/pkg/util/parallel"
 )
@@ -65,15 +63,15 @@ const ForbiddenListWarning = "Forbidden"
 
 // ProjectStatusDescriber generates extended information about a Project
 type ProjectStatusDescriber struct {
-	KubeClient kclientset.Interface
+	KubeClient kubernetes.Interface
 	RESTMapper meta.RESTMapper
 
 	// OpenShift clients
-	ProjectClient projectclient.ProjectInterface
-	BuildClient   buildclient.BuildInterface
-	ImageClient   imageclient.ImageInterface
-	AppsClient    appsclient.AppsV1Interface
-	RouteClient   routeclient.RouteInterface
+	ProjectClient projectv1client.ProjectV1Interface
+	BuildClient   buildv1client.BuildV1Interface
+	ImageClient   imagev1client.ImageV1Interface
+	AppsClient    appsv1client.AppsV1Interface
+	RouteClient   routev1client.RouteV1Interface
 	Server        string
 	Suggest       bool
 
@@ -101,9 +99,9 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set
 		&podLoader{namespace: namespace, lister: d.KubeClient.Core()},
 		&statefulSetLoader{namespace: namespace, lister: d.KubeClient.Apps()},
 		&horizontalPodAutoscalerLoader{namespace: namespace, lister: d.KubeClient.Autoscaling()},
-		&deploymentLoader{namespace: namespace, lister: d.KubeClient.Extensions()},
-		&replicasetLoader{namespace: namespace, lister: d.KubeClient.Extensions()},
-		&daemonsetLoader{namespace: namespace, lister: d.KubeClient.Extensions()},
+		&deploymentLoader{namespace: namespace, lister: d.KubeClient.Apps()},
+		&replicasetLoader{namespace: namespace, lister: d.KubeClient.Apps()},
+		&daemonsetLoader{namespace: namespace, lister: d.KubeClient.Apps()},
 		// TODO check swagger for feature enablement and selectively add bcLoader and buildLoader
 		// then remove errors.TolerateNotFoundError method.
 		&bcLoader{namespace: namespace, lister: d.BuildClient},
@@ -195,7 +193,7 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error
 	}
 
 	allNamespaces := namespace == metav1.NamespaceAll
-	var project *projectapi.Project
+	var project *projectv1.Project
 	if !allNamespaces {
 		p, err := d.ProjectClient.Projects().Get(namespace, metav1.GetOptions{})
 		if err != nil {
@@ -208,7 +206,7 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error
 			if !kapierrors.IsNotFound(err) {
 				return "", err
 			}
-			p = &projectapi.Project{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
+			p = &projectv1.Project{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
 		}
 		project = p
 		f = namespacedFormatter{currentNamespace: namespace}
@@ -700,7 +698,7 @@ func (f namespacedFormatter) ResourceName(obj interface{}) string {
 	}
 }
 
-func describeProjectAndServer(f formatter, project *projectapi.Project, server string) string {
+func describeProjectAndServer(f formatter, project *projectv1.Project, server string) string {
 	projectName := project.Name
 	displayName := project.Annotations[oapi.OpenShiftDisplayName]
 	if len(displayName) == 0 {
@@ -957,12 +955,12 @@ func (e exposedRoutes) Less(i, j int) bool {
 	}
 }
 
-func extractRouteInfo(route *routeapi.Route) (requested bool, other []string, errors []string) {
+func extractRouteInfo(route *routev1.Route) (requested bool, other []string, errors []string) {
 	reasons := sets.NewString()
 	for _, ingress := range route.Status.Ingress {
 		exact := route.Spec.Host == ingress.Host
-		switch status, condition := routedisplayhelpers.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status {
-		case kapi.ConditionFalse:
+		switch status, condition := routedisplayhelpers.IngressConditionStatus(&ingress, routev1.RouteAdmitted); status {
+		case corev1.ConditionFalse:
 			reasons.Insert(condition.Reason)
 		default:
 			if exact {
@@ -975,7 +973,7 @@ func extractRouteInfo(route *routeapi.Route) (requested bool, other []string, er
 	return requested, other, reasons.List()
 }
 
-func describeRouteExposed(host string, route *routeapi.Route, errors bool) string {
+func describeRouteExposed(host string, route *routev1.Route, errors bool) string {
 	var trailer string
 	if errors {
 		trailer = " (!)"
@@ -984,16 +982,16 @@ func describeRouteExposed(host string, route *routeapi.Route, errors bool) strin
 	switch {
 	case route.Spec.TLS == nil:
 		prefix = fmt.Sprintf("http://%s", host)
-	case route.Spec.TLS.Termination == routeapi.TLSTerminationPassthrough:
+	case route.Spec.TLS.Termination == routev1.TLSTerminationPassthrough:
 		prefix = fmt.Sprintf("https://%s (passthrough)", host)
-	case route.Spec.TLS.Termination == routeapi.TLSTerminationReencrypt:
+	case route.Spec.TLS.Termination == routev1.TLSTerminationReencrypt:
 		prefix = fmt.Sprintf("https://%s (reencrypt)", host)
-	case route.Spec.TLS.Termination != routeapi.TLSTerminationEdge:
+	case route.Spec.TLS.Termination != routev1.TLSTerminationEdge:
 		// future proof against other types of TLS termination being added
 		prefix = fmt.Sprintf("https://%s", host)
-	case route.Spec.TLS.InsecureEdgeTerminationPolicy == routeapi.InsecureEdgeTerminationPolicyRedirect:
+	case route.Spec.TLS.InsecureEdgeTerminationPolicy == routev1.InsecureEdgeTerminationPolicyRedirect:
 		prefix = fmt.Sprintf("https://%s (redirects)", host)
-	case route.Spec.TLS.InsecureEdgeTerminationPolicy == routeapi.InsecureEdgeTerminationPolicyAllow:
+	case route.Spec.TLS.InsecureEdgeTerminationPolicy == routev1.InsecureEdgeTerminationPolicyAllow:
 		prefix = fmt.Sprintf("https://%s (and http)", host)
 	default:
 		prefix = fmt.Sprintf("https://%s", host)
@@ -1034,7 +1032,7 @@ func describeRouteInServiceGroup(f formatter, routeNode *routegraph.RouteNode) [
 	return lines
 }
 
-func describeDeploymentConfigTrigger(dc *appsapi.DeploymentConfig) string {
+func describeDeploymentConfigTrigger(dc *appsv1.DeploymentConfig) string {
 	if len(dc.Spec.Triggers) == 0 {
 		return "(manual)"
 	}
@@ -1196,7 +1194,7 @@ func describeAdditionalBuildDetail(build *buildgraph.BuildConfigNode, lastSucces
 	return out
 }
 
-func describeBuildPhase(build *buildapi.Build, t *metav1.Time, parentName string, pushTargetResolved bool) string {
+func describeBuildPhase(build *buildv1.Build, t *metav1.Time, parentName string, pushTargetResolved bool) string {
 	imageStreamFailure := ""
 	// if we're using an image stream and that image stream is the internal registry and that registry doesn't exist
 	if (build.Spec.Output.To != nil) && !pushTargetResolved {
@@ -1228,11 +1226,11 @@ func describeBuildPhase(build *buildapi.Build, t *metav1.Time, parentName string
 		revision = fmt.Sprintf(" - %s", revision)
 	}
 	switch build.Status.Phase {
-	case buildapi.BuildPhaseComplete:
+	case buildv1.BuildPhaseComplete:
 		return fmt.Sprintf("%s succeeded %s ago%s%s", buildIdentification, time, revision, imageStreamFailure)
-	case buildapi.BuildPhaseError:
+	case buildv1.BuildPhaseError:
 		return fmt.Sprintf("%s stopped with an error %s ago%s%s", buildIdentification, time, revision, imageStreamFailure)
-	case buildapi.BuildPhaseFailed:
+	case buildv1.BuildPhaseFailed:
 		return fmt.Sprintf("%s failed %s ago%s%s", buildIdentification, time, revision, imageStreamFailure)
 	default:
 		status := strings.ToLower(string(build.Status.Phase))
@@ -1240,7 +1238,7 @@ func describeBuildPhase(build *buildapi.Build, t *metav1.Time, parentName string
 	}
 }
 
-func describeSourceRevision(rev *buildapi.SourceRevision) string {
+func describeSourceRevision(rev *buildv1.SourceRevision) string {
 	if rev == nil {
 		return ""
 	}
@@ -1263,7 +1261,7 @@ func describeSourceRevision(rev *buildapi.SourceRevision) string {
 	}
 }
 
-func describeSourceControlUser(user buildapi.SourceControlUser) string {
+func describeSourceControlUser(user buildv1.SourceControlUser) string {
 	if len(user.Name) == 0 {
 		return user.Email
 	}
@@ -1273,7 +1271,7 @@ func describeSourceControlUser(user buildapi.SourceControlUser) string {
 	return fmt.Sprintf("%s <%s>", user.Name, user.Email)
 }
 
-func buildTimestamp(build *buildapi.Build) metav1.Time {
+func buildTimestamp(build *buildv1.Build) metav1.Time {
 	if build == nil {
 		return metav1.Time{}
 	}
@@ -1286,7 +1284,7 @@ func buildTimestamp(build *buildapi.Build) metav1.Time {
 	return build.CreationTimestamp
 }
 
-func describeSourceInPipeline(source *buildapi.BuildSource) (string, bool) {
+func describeSourceInPipeline(source *buildv1.BuildSource) (string, bool) {
 	switch {
 	case source.Git != nil:
 		if len(source.Git.Ref) == 0 {
@@ -1322,13 +1320,13 @@ func describeDeployments(f formatter, dNode *kubegraph.DeploymentNode, activeDep
 	return out
 }
 
-func describeDeploymentStatus(rs *kapisext.ReplicaSet, revision int64, first bool, restartCount int32) string {
+func describeDeploymentStatus(rs *kappsv1.ReplicaSet, revision int64, first bool, restartCount int32) string {
 	timeAt := strings.ToLower(formatRelativeTime(rs.CreationTimestamp.Time))
 	replicaSetRevision, _ := deployutil.Revision(rs)
 	if replicaSetRevision == revision {
-		return fmt.Sprintf("deployment #%d running for %s%s", replicaSetRevision, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, rs.Spec.Replicas, false, restartCount))
+		return fmt.Sprintf("deployment #%d running for %s%s", replicaSetRevision, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, *rs.Spec.Replicas, false, restartCount))
 	} else {
-		return fmt.Sprintf("deployment #%d deployed %s ago%s", replicaSetRevision, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, rs.Spec.Replicas, first, restartCount))
+		return fmt.Sprintf("deployment #%d deployed %s ago%s", replicaSetRevision, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, *rs.Spec.Replicas, first, restartCount))
 	}
 }
 
@@ -1371,7 +1369,7 @@ func describeDeploymentConfigDeployments(f formatter, dcNode *appsgraph.Deployme
 	return out
 }
 
-func describeDeploymentConfigDeploymentStatus(rc *kapi.ReplicationController, first, test bool, restartCount int32) string {
+func describeDeploymentConfigDeploymentStatus(rc *corev1.ReplicationController, first, test bool, restartCount int32) string {
 	timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time))
 	status := appsutil.DeploymentStatusFor(rc)
 	version := appsutil.DeploymentVersionFor(rc)
@@ -1387,49 +1385,49 @@ func describeDeploymentConfigDeploymentStatus(rc *kapi.ReplicationController, fi
 			reason = fmt.Sprintf(": %s", reason)
 		}
 		// TODO: encode fail time in the rc
-		return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, false, restartCount))
+		return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, *rc.Spec.Replicas, false, restartCount))
 	case appsutil.DeploymentStatusComplete:
 		// TODO: pod status output
 		if test {
 			return fmt.Sprintf("test deployment #%d deployed %s ago", version, timeAt)
 		}
-		return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, first, restartCount))
+		return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, *rc.Spec.Replicas, first, restartCount))
 	case appsutil.DeploymentStatusRunning:
 		format := "deployment #%d running%s for %s%s"
 		if test {
 			format = "test deployment #%d running%s for %s%s"
 		}
-		return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, false, restartCount))
+		return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, *rc.Spec.Replicas, false, restartCount))
 	default:
-		return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, false, restartCount))
+		return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, *rc.Spec.Replicas, false, restartCount))
 	}
 }
 
-func describeDeploymentConfigRolloutStatus(d *kapisext.Deployment) string {
+func describeDeploymentConfigRolloutStatus(d *kappsv1.Deployment) string {
 	timeAt := strings.ToLower(formatRelativeTime(d.CreationTimestamp.Time))
-	return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(d.Status.Replicas), int32(d.Status.Replicas), int32(d.Spec.Replicas), false, 0))
+	return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(d.Status.Replicas), int32(d.Status.Replicas), *d.Spec.Replicas, false, 0))
 }
 
-func describeStatefulSetStatus(p *kapps.StatefulSet) string {
+func describeStatefulSetStatus(p *kappsv1.StatefulSet) string {
 	timeAt := strings.ToLower(formatRelativeTime(p.CreationTimestamp.Time))
 	// TODO: Replace first argument in describePodSummaryInline with ReadyReplicas once that's a thing for pet sets.
-	return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(p.Status.Replicas), int32(p.Status.Replicas), int32(p.Spec.Replicas), false, 0))
+	return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(p.Status.Replicas), int32(p.Status.Replicas), *p.Spec.Replicas, false, 0))
 }
 
-func describeDaemonSetStatus(ds *kapisext.DaemonSet) string {
+func describeDaemonSetStatus(ds *kappsv1.DaemonSet) string {
 	timeAt := strings.ToLower(formatRelativeTime(ds.CreationTimestamp.Time))
 	replicaSetRevision := ds.Generation
 	return fmt.Sprintf("generation #%d running for %s%s", replicaSetRevision, timeAt, describePodSummaryInline(ds.Status.NumberReady, ds.Status.NumberAvailable, ds.Status.DesiredNumberScheduled, false, 0))
 }
 
-func describeRCStatus(rc *kapi.ReplicationController) string {
+func describeRCStatus(rc *corev1.ReplicationController) string {
 	timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time))
-	return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, false, 0))
+	return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, *rc.Spec.Replicas, false, 0))
 }
 
-func describeRSStatus(rs *kapisext.ReplicaSet) string {
+func describeRSStatus(rs *kappsv1.ReplicaSet) string {
 	timeAt := strings.ToLower(formatRelativeTime(rs.CreationTimestamp.Time))
-	return fmt.Sprintf("rs/%s created %s ago%s", rs.Name, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, rs.Spec.Replicas, false, 0))
+	return fmt.Sprintf("rs/%s created %s ago%s", rs.Name, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, *rs.Spec.Replicas, false, 0))
 }
 
 func describePodSummaryInline(ready, actual, requested int32, includeEmpty bool, restartCount int32) string {
@@ -1468,13 +1466,13 @@ func describePodSummary(ready, requested int32, includeEmpty bool, restartCount
 	return fmt.Sprintf("%d/%d pods", ready, requested) + restartWarn
 }
 
-func describeDeploymentConfigTriggers(config *appsapi.DeploymentConfig) (string, bool) {
+func describeDeploymentConfigTriggers(config *appsv1.DeploymentConfig) (string, bool) {
 	hasConfig, hasImage := false, false
 	for _, t := range config.Spec.Triggers {
 		switch t.Type {
-		case appsapi.DeploymentTriggerOnConfigChange:
+		case appsv1.DeploymentTriggerOnConfigChange:
 			hasConfig = true
-		case appsapi.DeploymentTriggerOnImageChange:
+		case appsv1.DeploymentTriggerOnImageChange:
 			hasImage = true
 		}
 	}
@@ -1500,7 +1498,7 @@ func describeServiceInServiceGroup(f formatter, svc graphview.ServiceGroup, expo
 		return append([]string{fmt.Sprintf("%s (%s)", exposed[0], f.ResourceName(svc.Service))}, exposed[1:]...)
 	case len(exposed) == 1:
 		return []string{fmt.Sprintf("%s (%s)", exposed[0], f.ResourceName(svc.Service))}
-	case spec.Type == kapi.ServiceTypeNodePort:
+	case spec.Type == corev1.ServiceTypeNodePort:
 		return []string{fmt.Sprintf("%s (all nodes)%s", f.ResourceName(svc.Service), port)}
 	case ip == "None":
 		return []string{fmt.Sprintf("%s (headless)%s", f.ResourceName(svc.Service), port)}
@@ -1513,9 +1511,9 @@ func describeServiceInServiceGroup(f formatter, svc graphview.ServiceGroup, expo
 	}
 }
 
-func portOrNodePort(spec kapi.ServiceSpec, port kapi.ServicePort) string {
+func portOrNodePort(spec corev1.ServiceSpec, port corev1.ServicePort) string {
 	switch {
-	case spec.Type != kapi.ServiceTypeNodePort:
+	case spec.Type != corev1.ServiceTypeNodePort:
 		return strconv.Itoa(int(port.Port))
 	case port.NodePort == 0:
 		return "<initializing>"
@@ -1524,14 +1522,14 @@ func portOrNodePort(spec kapi.ServiceSpec, port kapi.ServicePort) string {
 	}
 }
 
-func describeServicePorts(spec kapi.ServiceSpec) string {
+func describeServicePorts(spec corev1.ServiceSpec) string {
 	switch len(spec.Ports) {
 	case 0:
 		return " no ports"
 
 	case 1:
 		port := portOrNodePort(spec, spec.Ports[0])
-		if spec.Ports[0].TargetPort.String() == "0" || spec.ClusterIP == kapi.ClusterIPNone || port == spec.Ports[0].TargetPort.String() {
+		if spec.Ports[0].TargetPort.String() == "0" || spec.ClusterIP == corev1.ClusterIPNone || port == spec.Ports[0].TargetPort.String() {
 			return fmt.Sprintf(":%s", port)
 		}
 		return fmt.Sprintf(":%s -> %s", port, spec.Ports[0].TargetPort.String())
@@ -1540,7 +1538,7 @@ func describeServicePorts(spec kapi.ServiceSpec) string {
 		pairs := []string{}
 		for _, port := range spec.Ports {
 			externalPort := portOrNodePort(spec, port)
-			if port.TargetPort.String() == "0" || spec.ClusterIP == kapi.ClusterIPNone {
+			if port.TargetPort.String() == "0" || spec.ClusterIP == corev1.ClusterIPNone {
 				pairs = append(pairs, externalPort)
 				continue
 			}
@@ -1558,7 +1556,7 @@ func filterBoringPods(pods []graphview.Pod) ([]graphview.Pod, error) {
 	monopods := []graphview.Pod{}
 
 	for _, pod := range pods {
-		actualPod, ok := pod.Pod.Object().(*kapi.Pod)
+		actualPod, ok := pod.Pod.Object().(*corev1.Pod)
 		if !ok {
 			continue
 		}
@@ -1568,7 +1566,7 @@ func filterBoringPods(pods []graphview.Pod) ([]graphview.Pod, error) {
 		}
 		_, isDeployerPod := meta.GetLabels()[appsutil.DeployerPodForDeploymentLabel]
 		_, isBuilderPod := meta.GetAnnotations()[buildapi.BuildAnnotation]
-		isFinished := actualPod.Status.Phase == kapi.PodSucceeded || actualPod.Status.Phase == kapi.PodFailed
+		isFinished := actualPod.Status.Phase == corev1.PodSucceeded || actualPod.Status.Phase == corev1.PodFailed
 		if isDeployerPod || isBuilderPod || isFinished {
 			continue
 		}
@@ -1588,8 +1586,8 @@ type GraphLoader interface {
 
 type rcLoader struct {
 	namespace string
-	lister    kcoreclient.ReplicationControllersGetter
-	items     []kapi.ReplicationController
+	lister    corev1client.ReplicationControllersGetter
+	items     []corev1.ReplicationController
 }
 
 func (l *rcLoader) Load() error {
@@ -1612,8 +1610,8 @@ func (l *rcLoader) AddToGraph(g osgraph.Graph) error {
 
 type serviceLoader struct {
 	namespace string
-	lister    kcoreclient.ServicesGetter
-	items     []kapi.Service
+	lister    corev1client.ServicesGetter
+	items     []corev1.Service
 }
 
 func (l *serviceLoader) Load() error {
@@ -1636,8 +1634,8 @@ func (l *serviceLoader) AddToGraph(g osgraph.Graph) error {
 
 type podLoader struct {
 	namespace string
-	lister    kcoreclient.PodsGetter
-	items     []kapi.Pod
+	lister    corev1client.PodsGetter
+	items     []corev1.Pod
 }
 
 func (l *podLoader) Load() error {
@@ -1660,8 +1658,8 @@ func (l *podLoader) AddToGraph(g osgraph.Graph) error {
 
 type statefulSetLoader struct {
 	namespace string
-	lister    kappsclient.StatefulSetsGetter
-	items     []kapps.StatefulSet
+	lister    kappsv1client.StatefulSetsGetter
+	items     []kappsv1.StatefulSet
 }
 
 func (l *statefulSetLoader) Load() error {
@@ -1684,8 +1682,8 @@ func (l *statefulSetLoader) AddToGraph(g osgraph.Graph) error {
 
 type horizontalPodAutoscalerLoader struct {
 	namespace string
-	lister    kautoscalingclient.HorizontalPodAutoscalersGetter
-	items     []autoscaling.HorizontalPodAutoscaler
+	lister    autoscalingv1client.HorizontalPodAutoscalersGetter
+	items     []autoscalingv1.HorizontalPodAutoscaler
 }
 
 func (l *horizontalPodAutoscalerLoader) Load() error {
@@ -1708,8 +1706,8 @@ func (l *horizontalPodAutoscalerLoader) AddToGraph(g osgraph.Graph) error {
 
 type deploymentLoader struct {
 	namespace string
-	lister    kapisextclient.DeploymentsGetter
-	items     []kapisext.Deployment
+	lister    kappsv1client.DeploymentsGetter
+	items     []kappsv1.Deployment
 }
 
 func (l *deploymentLoader) Load() error {
@@ -1732,8 +1730,8 @@ func (l *deploymentLoader) AddToGraph(g osgraph.Graph) error {
 
 type daemonsetLoader struct {
 	namespace string
-	lister    kapisextclient.DaemonSetsGetter
-	items     []kapisext.DaemonSet
+	lister    kappsv1client.DaemonSetsGetter
+	items     []kappsv1.DaemonSet
 }
 
 func (l *daemonsetLoader) Load() error {
@@ -1756,8 +1754,8 @@ func (l *daemonsetLoader) AddToGraph(g osgraph.Graph) error {
 
 type replicasetLoader struct {
 	namespace string
-	lister    kapisextclient.ReplicaSetsGetter
-	items     []kapisext.ReplicaSet
+	lister    kappsv1client.ReplicaSetsGetter
+	items     []kappsv1.ReplicaSet
 }
 
 func (l *replicasetLoader) Load() error {
@@ -1780,8 +1778,8 @@ func (l *replicasetLoader) AddToGraph(g osgraph.Graph) error {
 
 type serviceAccountLoader struct {
 	namespace string
-	lister    kcoreclient.ServiceAccountsGetter
-	items     []kapi.ServiceAccount
+	lister    corev1client.ServiceAccountsGetter
+	items     []corev1.ServiceAccount
 }
 
 func (l *serviceAccountLoader) Load() error {
@@ -1804,8 +1802,8 @@ func (l *serviceAccountLoader) AddToGraph(g osgraph.Graph) error {
 
 type secretLoader struct {
 	namespace string
-	lister    kcoreclient.SecretsGetter
-	items     []kapi.Secret
+	lister    corev1client.SecretsGetter
+	items     []corev1.Secret
 }
 
 func (l *secretLoader) Load() error {
@@ -1828,8 +1826,8 @@ func (l *secretLoader) AddToGraph(g osgraph.Graph) error {
 
 type pvcLoader struct {
 	namespace string
-	lister    kcoreclient.PersistentVolumeClaimsGetter
-	items     []kapi.PersistentVolumeClaim
+	lister    corev1client.PersistentVolumeClaimsGetter
+	items     []corev1.PersistentVolumeClaim
 }
 
 func (l *pvcLoader) Load() error {
@@ -1852,8 +1850,8 @@ func (l *pvcLoader) AddToGraph(g osgraph.Graph) error {
 
 type isLoader struct {
 	namespace string
-	lister    imageclient.ImageStreamsGetter
-	items     []imageapi.ImageStream
+	lister    imagev1client.ImageStreamsGetter
+	items     []imagev1.ImageStream
 }
 
 func (l *isLoader) Load() error {
@@ -1877,7 +1875,7 @@ func (l *isLoader) AddToGraph(g osgraph.Graph) error {
 
 type dcLoader struct {
 	namespace string
-	lister    appsclient.DeploymentConfigsGetter
+	lister    appsv1client.DeploymentConfigsGetter
 	items     []appsv1.DeploymentConfig
 }
 
@@ -1893,7 +1891,7 @@ func (l *dcLoader) Load() error {
 
 func (l *dcLoader) AddToGraph(g osgraph.Graph) error {
 	for i := range l.items {
-		internalConfig := &appsapi.DeploymentConfig{}
+		internalConfig := &appsv1.DeploymentConfig{}
 		if err := legacyscheme.Scheme.Convert(&l.items[i], internalConfig, nil); err != nil {
 			return err
 		}
@@ -1905,8 +1903,8 @@ func (l *dcLoader) AddToGraph(g osgraph.Graph) error {
 
 type bcLoader struct {
 	namespace string
-	lister    buildclient.BuildConfigsGetter
-	items     []buildapi.BuildConfig
+	lister    buildv1client.BuildConfigsGetter
+	items     []buildv1.BuildConfig
 }
 
 func (l *bcLoader) Load() error {
@@ -1929,8 +1927,8 @@ func (l *bcLoader) AddToGraph(g osgraph.Graph) error {
 
 type buildLoader struct {
 	namespace string
-	lister    buildclient.BuildsGetter
-	items     []buildapi.Build
+	lister    buildv1client.BuildsGetter
+	items     []buildv1.Build
 }
 
 func (l *buildLoader) Load() error {
@@ -1953,8 +1951,8 @@ func (l *buildLoader) AddToGraph(g osgraph.Graph) error {
 
 type routeLoader struct {
 	namespace string
-	lister    routeclient.RoutesGetter
-	items     []routeapi.Route
+	lister    routev1client.RoutesGetter
+	items     []routev1.Route
 }
 
 func (l *routeLoader) Load() error {
diff --git a/pkg/oc/lib/describe/projectstatus_test.go b/pkg/oc/lib/describe/projectstatus_test.go
index da67ad78af22..eff0593f6a75 100644
--- a/pkg/oc/lib/describe/projectstatus_test.go
+++ b/pkg/oc/lib/describe/projectstatus_test.go
@@ -2,38 +2,44 @@ package describe
 
 import (
 	"bytes"
+	"fmt"
 	"io/ioutil"
 	"strings"
 	"testing"
 	"text/tabwriter"
 	"time"
 
-	appsv1 "github.com/openshift/api/apps/v1"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
 	"k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
 	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-	"k8s.io/apimachinery/pkg/util/yaml"
+	fakekubernetes "k8s.io/client-go/kubernetes/fake"
+	kubernetesscheme "k8s.io/client-go/kubernetes/scheme"
 	clientgotesting "k8s.io/client-go/testing"
-	"k8s.io/kubernetes/pkg/api/legacyscheme"
-	kubefakeclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
-	kubeclientscheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme"
+	"k8s.io/kubernetes/pkg/kubectl/scheme"
 
-	appsfakeclient "github.com/openshift/client-go/apps/clientset/versioned/fake"
+	"github.com/openshift/api"
+	appsv1 "github.com/openshift/api/apps/v1"
+	buildv1 "github.com/openshift/api/build/v1"
+	imagev1 "github.com/openshift/api/image/v1"
+	projectv1 "github.com/openshift/api/project/v1"
+	routev1 "github.com/openshift/api/route/v1"
+	fakeappsclient "github.com/openshift/client-go/apps/clientset/versioned/fake"
+	fakeappsv1client "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1/fake"
+	fakebuildclient "github.com/openshift/client-go/build/clientset/versioned/fake"
+	fakebuildv1client "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake"
+	fakeimageclient "github.com/openshift/client-go/image/clientset/versioned/fake"
+	fakeimagev1client "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake"
+	fakeprojectclient "github.com/openshift/client-go/project/clientset/versioned/fake"
+	fakeprojectv1client "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1/fake"
+	fakerouteclient "github.com/openshift/client-go/route/clientset/versioned/fake"
+	fakeroutev1client "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake"
 	oapi "github.com/openshift/origin/pkg/api"
-	buildfakeclient "github.com/openshift/origin/pkg/build/generated/internalclientset/fake"
-	buildclientscheme "github.com/openshift/origin/pkg/build/generated/internalclientset/scheme"
-	imagefakeclient "github.com/openshift/origin/pkg/image/generated/internalclientset/fake"
-	imageclientscheme "github.com/openshift/origin/pkg/image/generated/internalclientset/scheme"
+	"github.com/openshift/origin/pkg/api/install"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	projectapi "github.com/openshift/origin/pkg/project/apis/project"
-	projectfakeclient "github.com/openshift/origin/pkg/project/generated/internalclientset/fake"
-	projectclientscheme "github.com/openshift/origin/pkg/project/generated/internalclientset/scheme"
-	routefakeclient "github.com/openshift/origin/pkg/route/generated/internalclientset/fake"
-	routeclientscheme "github.com/openshift/origin/pkg/route/generated/internalclientset/scheme"
 )
 
 func mustParseTime(t string) time.Time {
@@ -57,7 +63,7 @@ func TestProjectStatus(t *testing.T) {
 		},
 		"empty project with display name": {
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{
 						Name:      "example",
 						Namespace: "",
@@ -76,7 +82,7 @@ func TestProjectStatus(t *testing.T) {
 		"empty service": {
 			File: "k8s-service-with-nothing.json",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -91,7 +97,7 @@ func TestProjectStatus(t *testing.T) {
 		"service with RC": {
 			File: "k8s-unserviced-rc.json",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -107,7 +113,7 @@ func TestProjectStatus(t *testing.T) {
 		"external name service": {
 			File: "external-name-service.json",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -121,7 +127,7 @@ func TestProjectStatus(t *testing.T) {
 		"rc with unmountable and missing secrets": {
 			File: "bad_secret_with_just_rc.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -136,7 +142,7 @@ func TestProjectStatus(t *testing.T) {
 		"dueling rcs": {
 			File: "dueling-rcs.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "dueling-rc", Namespace: ""},
 				},
 			},
@@ -149,7 +155,7 @@ func TestProjectStatus(t *testing.T) {
 		"service with pod": {
 			File: "service-with-pod.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -164,7 +170,7 @@ func TestProjectStatus(t *testing.T) {
 		"build chains": {
 			File: "build-chains.json",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -176,7 +182,7 @@ func TestProjectStatus(t *testing.T) {
 		"scheduled image stream": {
 			File: "prereq-image-present-with-sched.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -188,7 +194,7 @@ func TestProjectStatus(t *testing.T) {
 		"standalone rc": {
 			File: "bare-rc.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -202,7 +208,7 @@ func TestProjectStatus(t *testing.T) {
 		"unstarted build": {
 			File: "new-project-no-build.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -221,7 +227,7 @@ func TestProjectStatus(t *testing.T) {
 		"unpushable build": {
 			File: "unpushable-build.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -233,7 +239,7 @@ func TestProjectStatus(t *testing.T) {
 		"bare-bc-can-push": {
 			File: "bare-bc-can-push.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -249,7 +255,7 @@ func TestProjectStatus(t *testing.T) {
 		"cyclical build": {
 			File: "circular.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -263,7 +269,7 @@ func TestProjectStatus(t *testing.T) {
 		"running build": {
 			File: "new-project-one-build.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -282,7 +288,7 @@ func TestProjectStatus(t *testing.T) {
 		"a/b test DeploymentConfig": {
 			File: "new-project-two-deployment-configs.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -302,7 +308,7 @@ func TestProjectStatus(t *testing.T) {
 		"with real deployments": {
 			File: "new-project-deployed-app.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -334,7 +340,7 @@ func TestProjectStatus(t *testing.T) {
 			File:  "deployment.yaml",
 			ErrFn: func(err error) bool { return err == nil },
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -350,7 +356,7 @@ func TestProjectStatus(t *testing.T) {
 		"with stateful sets": {
 			File: "statefulset.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -367,7 +373,7 @@ func TestProjectStatus(t *testing.T) {
 		"restarting pod": {
 			File: "restarting-pod.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -382,7 +388,7 @@ func TestProjectStatus(t *testing.T) {
 		"cross namespace reference": {
 			File: "different-project-image-deployment.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -396,7 +402,7 @@ func TestProjectStatus(t *testing.T) {
 		"monopod": {
 			File: "k8s-lonely-pod.json",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -410,7 +416,7 @@ func TestProjectStatus(t *testing.T) {
 		"deploys single pod": {
 			File: "simple-deployment.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -424,7 +430,7 @@ func TestProjectStatus(t *testing.T) {
 		"deployment with unavailable pods": {
 			File: "available-deployment.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -438,7 +444,7 @@ func TestProjectStatus(t *testing.T) {
 		"standalone daemonset": {
 			File: "rollingupdate-daemonset.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -452,7 +458,7 @@ func TestProjectStatus(t *testing.T) {
 		"hpa non-missing scaleref": {
 			File: "hpa-with-scale-ref.yaml",
 			Extra: []runtime.Object{
-				&projectapi.Project{
+				&projectv1.Project{
 					ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""},
 				},
 			},
@@ -466,7 +472,17 @@ func TestProjectStatus(t *testing.T) {
 	defer func() { timeNowFn = oldTimeFn }()
 
 	appsScheme := runtime.NewScheme()
-	appsapi.Install(appsScheme)
+	appsv1.Install(appsScheme)
+	buildScheme := runtime.NewScheme()
+	buildv1.Install(buildScheme)
+	imageScheme := runtime.NewScheme()
+	imagev1.Install(imageScheme)
+	projectScheme := runtime.NewScheme()
+	projectv1.Install(projectScheme)
+	routeScheme := runtime.NewScheme()
+	routev1.Install(routeScheme)
+	kubeScheme := runtime.NewScheme()
+	kubernetesscheme.AddToScheme(kubeScheme)
 
 	for k, test := range testCases {
 		t.Run(k, func(t *testing.T) {
@@ -480,8 +496,7 @@ func TestProjectStatus(t *testing.T) {
 			if len(test.File) > 0 {
 				// Load data from a folder dedicated to mock data, which is never loaded into the API during tests
 				var err error
-				objs, err = readObjectsFromPath("../../../../pkg/oc/lib/graph/genericgraph/test/"+test.File, "example",
-					legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...), legacyscheme.Scheme)
+				objs, err = readObjectsFromPath("../../../../pkg/oc/lib/graph/genericgraph/test/"+test.File, "example")
 				if err != nil {
 					t.Errorf("%s: unexpected error: %v", k, err)
 				}
@@ -490,41 +505,26 @@ func TestProjectStatus(t *testing.T) {
 				objs = append(objs, o)
 			}
 
-			kc := kubefakeclient.NewSimpleClientset(filterByScheme(kubeclientscheme.Scheme, objs...)...)
-			projectClient := projectfakeclient.NewSimpleClientset(filterByScheme(projectclientscheme.Scheme, objs...)...)
-			buildClient := buildfakeclient.NewSimpleClientset(filterByScheme(buildclientscheme.Scheme, objs...)...)
-			imageClient := imagefakeclient.NewSimpleClientset(filterByScheme(imageclientscheme.Scheme, objs...)...)
-
-			appsInternalObjects := filterByScheme(appsScheme, objs...)
-			appsExternalObjects := []runtime.Object{}
-			for _, obj := range appsInternalObjects {
-				dcExternal := &appsv1.DeploymentConfig{}
-				_, ok := obj.(*appsapi.DeploymentConfig)
-				if !ok {
-					continue
-				}
-				if err := legacyscheme.Scheme.Convert(obj, dcExternal, nil); err != nil {
-					panic(err)
-				}
-				appsExternalObjects = append(appsExternalObjects, dcExternal)
-			}
-			appsClient := appsfakeclient.NewSimpleClientset(appsExternalObjects...)
-
-			routeClient := routefakeclient.NewSimpleClientset(filterByScheme(routeclientscheme.Scheme, objs...)...)
+			kc := fakekubernetes.NewSimpleClientset(filterByScheme(kubeScheme, objs...)...)
+			projectClient := &fakeprojectv1client.FakeProjectV1{Fake: &(fakeprojectclient.NewSimpleClientset(filterByScheme(projectScheme, objs...)...).Fake)}
+			buildClient := &fakebuildv1client.FakeBuildV1{Fake: &(fakebuildclient.NewSimpleClientset(filterByScheme(buildScheme, objs...)...).Fake)}
+			imageClient := &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset(filterByScheme(imageScheme, objs...)...).Fake)}
+			appsClient := &fakeappsv1client.FakeAppsV1{Fake: &(fakeappsclient.NewSimpleClientset(filterByScheme(appsScheme, objs...)...).Fake)}
+			routeClient := &fakeroutev1client.FakeRouteV1{Fake: &(fakerouteclient.NewSimpleClientset(filterByScheme(routeScheme, objs...)...).Fake)}
 
 			d := ProjectStatusDescriber{
 				KubeClient:                  kc,
-				ProjectClient:               projectClient.Project(),
-				BuildClient:                 buildClient.Build(),
-				ImageClient:                 imageClient.Image(),
-				AppsClient:                  appsClient.Apps(),
-				RouteClient:                 routeClient.Route(),
+				ProjectClient:               projectClient,
+				BuildClient:                 buildClient,
+				ImageClient:                 imageClient,
+				AppsClient:                  appsClient,
+				RouteClient:                 routeClient,
 				Server:                      "https://example.com:8443",
 				Suggest:                     true,
 				CommandBaseName:             "oc",
 				LogsCommandName:             "oc logs -p",
 				SecurityPolicyCommandFormat: "policycommand %s %s",
-				RESTMapper:                  testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
+				RESTMapper:                  testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme),
 			}
 			t.Logf("describing %q ...", test.File)
 			out, err := d.Describe("example", "")
@@ -564,26 +564,26 @@ func TestProjectStatusErrors(t *testing.T) {
 		},
 	}
 	for k, test := range testCases {
-		projectClient := projectfakeclient.NewSimpleClientset()
-		buildClient := buildfakeclient.NewSimpleClientset()
-		imageClient := imagefakeclient.NewSimpleClientset()
-		routeClient := routefakeclient.NewSimpleClientset()
-		appsClient := appsfakeclient.NewSimpleClientset()
+		projectClient := &fakeprojectv1client.FakeProjectV1{Fake: &(fakeprojectclient.NewSimpleClientset().Fake)}
+		buildClient := &fakebuildv1client.FakeBuildV1{Fake: &(fakebuildclient.NewSimpleClientset().Fake)}
+		imageClient := &fakeimagev1client.FakeImageV1{Fake: &(fakeimageclient.NewSimpleClientset().Fake)}
+		routeClient := &fakeroutev1client.FakeRouteV1{Fake: &(fakerouteclient.NewSimpleClientset().Fake)}
+		appsClient := &fakeappsv1client.FakeAppsV1{Fake: &(fakeappsclient.NewSimpleClientset().Fake)}
 		projectClient.PrependReactor("*", "*", func(_ clientgotesting.Action) (bool, runtime.Object, error) {
 			return true, nil, test.Err
 		})
-		kc := kubefakeclient.NewSimpleClientset()
+		kc := fakekubernetes.NewSimpleClientset()
 		kc.PrependReactor("*", "*", func(action clientgotesting.Action) (bool, runtime.Object, error) {
 			return true, nil, test.Err
 		})
 
 		d := ProjectStatusDescriber{
 			KubeClient:                  kc,
-			ProjectClient:               projectClient.Project(),
-			BuildClient:                 buildClient.Build(),
-			ImageClient:                 imageClient.Image(),
-			AppsClient:                  appsClient.Apps(),
-			RouteClient:                 routeClient.Route(),
+			ProjectClient:               projectClient,
+			BuildClient:                 buildClient,
+			ImageClient:                 imageClient,
+			AppsClient:                  appsClient,
+			RouteClient:                 routeClient,
 			Server:                      "https://example.com:8443",
 			Suggest:                     true,
 			CommandBaseName:             "oc",
@@ -658,25 +658,32 @@ func TestPrintMarkerSuggestions(t *testing.T) {
 	}
 }
 
-// ReadObjectsFromPath reads objects from the specified file for testing.
-func readObjectsFromPath(path, namespace string, decoder runtime.Decoder, typer runtime.ObjectTyper) ([]runtime.Object, error) {
+// readObjectsFromPath reads objects from the specified file for testing.
+func readObjectsFromPath(path, namespace string) ([]runtime.Object, error) {
 	data, err := ioutil.ReadFile(path)
 	if err != nil {
 		return nil, err
 	}
-	data, err = yaml.ToJSON(data)
-	if err != nil {
-		return nil, err
-	}
+	// Create a scheme with only the types we care about, also to ensure we
+	// are not messing with the bult-in schemes.
+	// We need to perform roundtripping to invoke defaulting, just deserializing
+	// files is not sufficient here.
+	scheme := runtime.NewScheme()
+	kubernetesscheme.AddToScheme(scheme)
+	api.Install(scheme)
+	install.InstallInternalKube(scheme)
+	install.InstallInternalOpenShift(scheme)
+	codecs := serializer.NewCodecFactory(scheme)
+	decoder := codecs.UniversalDecoder()
 	obj, err := runtime.Decode(decoder, data)
 	if err != nil {
 		return nil, err
 	}
 	if !meta.IsListType(obj) {
-		if err := setNamespace(typer, obj, namespace); err != nil {
+		if err := setNamespace(scheme, obj, namespace); err != nil {
 			return nil, err
 		}
-		return []runtime.Object{obj}, nil
+		return convertToExternal(scheme, []runtime.Object{obj})
 	}
 	list, err := meta.ExtractList(obj)
 	if err != nil {
@@ -687,23 +694,46 @@ func readObjectsFromPath(path, namespace string, decoder runtime.Decoder, typer
 		return nil, errs[0]
 	}
 	for _, o := range list {
-		if err := setNamespace(typer, o, namespace); err != nil {
+		if err := setNamespace(scheme, o, namespace); err != nil {
+			return nil, err
+		}
+	}
+	return convertToExternal(scheme, list)
+}
+
+func convertToExternal(scheme *runtime.Scheme, objs []runtime.Object) ([]runtime.Object, error) {
+	result := make([]runtime.Object, 0, len(objs))
+	for _, obj := range objs {
+		gvks, _, err := scheme.ObjectKinds(obj)
+		if err != nil {
+			return nil, err
+		}
+		if len(gvks) == 0 {
+			return nil, fmt.Errorf("Unknown GroupVersionKind for %#v", obj)
+		}
+		gvs := scheme.PrioritizedVersionsForGroup(gvks[0].Group)
+		if len(gvs) == 0 {
+			return nil, fmt.Errorf("Unknown GroupVersion for %#v", obj)
+		}
+		ext, err := scheme.ConvertToVersion(obj, gvs[0])
+		if err != nil {
 			return nil, err
 		}
+		result = append(result, ext)
 	}
-	return list, nil
+	return result, nil
 }
 
-func setNamespace(typer runtime.ObjectTyper, obj runtime.Object, namespace string) error {
+func setNamespace(scheme *runtime.Scheme, obj runtime.Object, namespace string) error {
 	itemMeta, err := meta.Accessor(obj)
 	if err != nil {
 		return err
 	}
-	gvks, _, err := typer.ObjectKinds(obj)
+	gvks, _, err := scheme.ObjectKinds(obj)
 	if err != nil {
 		return err
 	}
-	mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
+	mapper := testrestmapper.TestOnlyStaticRESTMapper(scheme)
 	mapping, err := mapper.RESTMappings(gvks[0].GroupKind(), gvks[0].Version)
 	if err != nil {
 		return err
diff --git a/pkg/oc/lib/graph/appsgraph/analysis/dc.go b/pkg/oc/lib/graph/appsgraph/analysis/dc.go
index 2800339ac3e2..6a844363b872 100644
--- a/pkg/oc/lib/graph/appsgraph/analysis/dc.go
+++ b/pkg/oc/lib/graph/appsgraph/analysis/dc.go
@@ -5,10 +5,10 @@ import (
 
 	"github.com/gonum/graph"
 
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 	kdeplutil "k8s.io/kubernetes/pkg/controller/deployment/util"
 
-	buildinternalhelpers "github.com/openshift/origin/pkg/build/apis/build/internal_helpers"
+	buildutil "github.com/openshift/origin/pkg/build/util"
 	appsedges "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	buildedges "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph"
@@ -72,7 +72,7 @@ func ictMarker(g osgraph.Graph, f osgraph.Namer, dcNode *appsgraph.DeploymentCon
 
 			for _, bcNode := range buildedges.BuildConfigsForTag(g, istNode) {
 				// Avoid warning for the dc image trigger in case there is a build in flight.
-				if latestBuild := buildedges.GetLatestBuild(g, bcNode); latestBuild != nil && !buildinternalhelpers.IsBuildComplete(
+				if latestBuild := buildedges.GetLatestBuild(g, bcNode); latestBuild != nil && !buildutil.IsBuildComplete(
 					latestBuild.
 						Build) {
 					return nil
@@ -169,7 +169,7 @@ func pvcMarker(g osgraph.Graph, f osgraph.Namer, dcNode *appsgraph.DeploymentCon
 		isBlockedRolling := false
 		rollingParams := dc.Spec.Strategy.RollingParams
 		if rollingParams != nil {
-			maxSurge, _, _ := kdeplutil.ResolveFenceposts(&rollingParams.MaxSurge, &rollingParams.MaxUnavailable, dc.Spec.Replicas)
+			maxSurge, _, _ := kdeplutil.ResolveFenceposts(rollingParams.MaxSurge, rollingParams.MaxUnavailable, dc.Spec.Replicas)
 			isBlockedRolling = maxSurge > 0
 		}
 		// If the claim is not RWO or deployments will not have more than a pod running at any time
@@ -195,7 +195,7 @@ func pvcMarker(g osgraph.Graph, f osgraph.Namer, dcNode *appsgraph.DeploymentCon
 
 func hasRWOAccess(pvcNode *kubegraph.PersistentVolumeClaimNode) bool {
 	for _, accessMode := range pvcNode.PersistentVolumeClaim.Spec.AccessModes {
-		if accessMode == kapi.ReadWriteOnce {
+		if accessMode == corev1.ReadWriteOnce {
 			return true
 		}
 	}
diff --git a/pkg/oc/lib/graph/appsgraph/edge_test.go b/pkg/oc/lib/graph/appsgraph/edge_test.go
index c4e8ae2d70c1..a7c91062d6db 100644
--- a/pkg/oc/lib/graph/appsgraph/edge_test.go
+++ b/pkg/oc/lib/graph/appsgraph/edge_test.go
@@ -6,11 +6,12 @@ import (
 
 	"github.com/gonum/graph"
 
+	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/runtime"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsv1 "github.com/openshift/api/apps/v1"
+	appsutil "github.com/openshift/origin/pkg/apps/util"
 	nodes "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
@@ -24,16 +25,16 @@ func TestNamespaceEdgeMatching(t *testing.T) {
 	g := osgraph.New()
 
 	fn := func(namespace string, g osgraph.Interface) {
-		dc := &appsapi.DeploymentConfig{}
+		dc := &appsv1.DeploymentConfig{}
 		dc.Namespace = namespace
 		dc.Name = "the-dc"
 		dc.Spec.Selector = map[string]string{"a": "1"}
 		nodes.EnsureDeploymentConfigNode(g, dc)
 
-		rc := &kapi.ReplicationController{}
+		rc := &corev1.ReplicationController{}
 		rc.Namespace = namespace
 		rc.Name = "the-rc"
-		rc.Annotations = map[string]string{appsapi.DeploymentConfigAnnotation: "the-dc"}
+		rc.Annotations = map[string]string{appsutil.DeploymentConfigAnnotation: "the-dc"}
 		kubegraph.EnsureReplicationControllerNode(g, rc)
 	}
 
@@ -68,9 +69,9 @@ func namespaceFor(node graph.Node) (string, error) {
 			return "", err
 		}
 		return meta.GetNamespace(), nil
-	case *kapi.PodSpec:
+	case *corev1.PodSpec:
 		return node.(*kubegraph.PodSpecNode).Namespace, nil
-	case *kapi.ReplicationControllerSpec:
+	case *corev1.ReplicationControllerSpec:
 		return node.(*kubegraph.ReplicationControllerSpecNode).Namespace, nil
 	default:
 		return "", fmt.Errorf("unknown object: %#v", obj)
diff --git a/pkg/oc/lib/graph/appsgraph/edges.go b/pkg/oc/lib/graph/appsgraph/edges.go
index cc115a0d092f..95b31c377c5e 100644
--- a/pkg/oc/lib/graph/appsgraph/edges.go
+++ b/pkg/oc/lib/graph/appsgraph/edges.go
@@ -4,15 +4,13 @@ import (
 	"github.com/golang/glog"
 	"github.com/gonum/graph"
 
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
-	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
@@ -25,6 +23,8 @@ const (
 	DeploymentEdgeKind = "Deployment"
 	// VolumeClaimEdgeKind goes from DeploymentConfigs to PersistentVolumeClaims indicating a request for persistent storage.
 	VolumeClaimEdgeKind = "VolumeClaim"
+	// ManagedByControllerEdgeKind goes from Pod to controller when the Pod satisfies a controller's label selector
+	ManagedByControllerEdgeKind = "ManagedByController"
 )
 
 // AddTriggerDeploymentConfigsEdges creates edges that point to named Docker image repositories for each image used in the deployment.
@@ -34,10 +34,10 @@ func AddTriggerDeploymentConfigsEdges(g osgraph.MutableUniqueGraph, node *appsgr
 		return node
 	}
 
-	appsapi.EachTemplateImage(
+	EachTemplateImage(
 		&podTemplate.Spec,
-		appsapi.DeploymentConfigHasTrigger(node.DeploymentConfig),
-		func(image appsapi.TemplateImage, err error) {
+		DeploymentConfigHasTrigger(node.DeploymentConfig),
+		func(image TemplateImage, err error) {
 			if err != nil {
 				return
 			}
@@ -76,7 +76,7 @@ func AddDeploymentConfigsDeploymentEdges(g osgraph.MutableUniqueGraph, node *app
 			}
 			if BelongsToDeploymentConfig(node.DeploymentConfig, rcNode.ReplicationController) {
 				g.AddEdge(node, rcNode, DeploymentEdgeKind)
-				g.AddEdge(rcNode, node, kubeedges.ManagedByControllerEdgeKind)
+				g.AddEdge(rcNode, node, ManagedByControllerEdgeKind)
 			}
 		}
 	}
@@ -104,7 +104,7 @@ func AddVolumeClaimEdges(g osgraph.Graph, dcNode *appsgraph.DeploymentConfigNode
 			continue
 		}
 
-		syntheticClaim := &kapi.PersistentVolumeClaim{
+		syntheticClaim := &corev1.PersistentVolumeClaim{
 			ObjectMeta: metav1.ObjectMeta{
 				Name:      source.PersistentVolumeClaim.ClaimName,
 				Namespace: dcNode.DeploymentConfig.Namespace,
diff --git a/pkg/oc/lib/graph/appsgraph/helpers.go b/pkg/oc/lib/graph/appsgraph/helpers.go
index 50b416071ddd..671d4fe78aba 100644
--- a/pkg/oc/lib/graph/appsgraph/helpers.go
+++ b/pkg/oc/lib/graph/appsgraph/helpers.go
@@ -1,12 +1,14 @@
 package appsgraph
 
 import (
+	"fmt"
 	"sort"
 
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsv1 "github.com/openshift/api/apps/v1"
 	appsutil "github.com/openshift/origin/pkg/apps/util"
+	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
@@ -33,7 +35,7 @@ func RelevantDeployments(g osgraph.Graph, dcNode *appsgraph.DeploymentConfigNode
 	return nil, allDeployments
 }
 
-func BelongsToDeploymentConfig(config *appsapi.DeploymentConfig, b *kapi.ReplicationController) bool {
+func BelongsToDeploymentConfig(config *appsv1.DeploymentConfig, b *corev1.ReplicationController) bool {
 	if b.Annotations != nil {
 		return config.Name == appsutil.DeploymentConfigNameFor(b)
 	}
@@ -47,3 +49,102 @@ func (m RecentDeploymentReferences) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
 func (m RecentDeploymentReferences) Less(i, j int) bool {
 	return appsutil.DeploymentVersionFor(m[i].ReplicationController) > appsutil.DeploymentVersionFor(m[j].ReplicationController)
 }
+
+// TemplateImage is a structure for helping a caller iterate over a PodSpec
+type TemplateImage struct {
+	Image string
+
+	Ref *imageapi.DockerImageReference
+
+	From *corev1.ObjectReference
+
+	Container *corev1.Container
+}
+
+// templateImageForContainer takes a container and returns a TemplateImage.
+func templateImageForContainer(container *corev1.Container, triggerFn TriggeredByFunc) (TemplateImage, error) {
+	var ref imageapi.DockerImageReference
+	if trigger, ok := triggerFn(container); ok {
+		trigger.Image = container.Image
+		trigger.Container = container
+		return trigger, nil
+	}
+	ref, err := imageapi.ParseDockerImageReference(container.Image)
+	if err != nil {
+		return TemplateImage{Image: container.Image, Container: container}, err
+	}
+	return TemplateImage{Image: container.Image, Ref: &ref, Container: container}, nil
+}
+
+// TemplateImageForContainer locates the requested container in a pod spec, returning information about the
+// trigger (if it exists), or an error.
+func TemplateImageForContainer(pod *corev1.PodSpec, triggerFn TriggeredByFunc, containerName string) (TemplateImage, error) {
+	for i := range pod.Containers {
+		container := &pod.Containers[i]
+		if container.Name != containerName {
+			continue
+		}
+		return templateImageForContainer(container, triggerFn)
+	}
+	for i := range pod.InitContainers {
+		container := &pod.InitContainers[i]
+		if container.Name != containerName {
+			continue
+		}
+		return templateImageForContainer(container, triggerFn)
+	}
+	return TemplateImage{}, fmt.Errorf("no container %q found", containerName)
+}
+
+// eachTemplateImage invokes triggerFn and fn on the provided container.
+func eachTemplateImage(container *corev1.Container, triggerFn TriggeredByFunc, fn func(TemplateImage, error)) {
+	image, err := templateImageForContainer(container, triggerFn)
+	fn(image, err)
+}
+
+// EachTemplateImage iterates a pod spec, looking for triggers that match each container and invoking
+// fn with each located image.
+func EachTemplateImage(pod *corev1.PodSpec, triggerFn TriggeredByFunc, fn func(TemplateImage, error)) {
+	for i := range pod.Containers {
+		eachTemplateImage(&pod.Containers[i], triggerFn, fn)
+	}
+	for i := range pod.InitContainers {
+		eachTemplateImage(&pod.InitContainers[i], triggerFn, fn)
+	}
+}
+
+// TriggeredByFunc returns a TemplateImage or error from the provided container
+type TriggeredByFunc func(container *corev1.Container) (TemplateImage, bool)
+
+// IgnoreTriggers ignores the triggers
+func IgnoreTriggers(container *corev1.Container) (TemplateImage, bool) {
+	return TemplateImage{}, false
+}
+
+// DeploymentConfigHasTrigger returns a function that can identify the image for each container.
+func DeploymentConfigHasTrigger(config *appsv1.DeploymentConfig) TriggeredByFunc {
+	return func(container *corev1.Container) (TemplateImage, bool) {
+		for _, trigger := range config.Spec.Triggers {
+			params := trigger.ImageChangeParams
+			if params == nil {
+				continue
+			}
+			for _, name := range params.ContainerNames {
+				if container.Name == name {
+					if len(params.From.Name) == 0 {
+						continue
+					}
+					from := params.From
+					if len(from.Namespace) == 0 {
+						from.Namespace = config.Namespace
+					}
+					return TemplateImage{
+						Image: container.Image,
+						From:  &from,
+					}, true
+				}
+			}
+		}
+		return TemplateImage{}, false
+	}
+}
diff --git a/pkg/oc/lib/graph/appsgraph/nodes/nodes.go b/pkg/oc/lib/graph/appsgraph/nodes/nodes.go
index 17c55e25f827..f560abce2d78 100644
--- a/pkg/oc/lib/graph/appsgraph/nodes/nodes.go
+++ b/pkg/oc/lib/graph/appsgraph/nodes/nodes.go
@@ -3,13 +3,13 @@ package nodes
 import (
 	"github.com/gonum/graph"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsv1 "github.com/openshift/api/apps/v1"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
 // EnsureDeploymentConfigNode adds the provided deployment config to the graph if it does not exist
-func EnsureDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *appsapi.DeploymentConfig) *DeploymentConfigNode {
+func EnsureDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *appsv1.DeploymentConfig) *DeploymentConfigNode {
 	dcName := DeploymentConfigNodeName(dc)
 	dcNode := osgraph.EnsureUnique(
 		g,
@@ -27,7 +27,7 @@ func EnsureDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *appsapi.Deploy
 	return dcNode
 }
 
-func FindOrCreateSyntheticDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *appsapi.DeploymentConfig) *DeploymentConfigNode {
+func FindOrCreateSyntheticDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *appsv1.DeploymentConfig) *DeploymentConfigNode {
 	return osgraph.EnsureUnique(
 		g,
 		DeploymentConfigNodeName(dc),
diff --git a/pkg/oc/lib/graph/appsgraph/nodes/nodes_test.go b/pkg/oc/lib/graph/appsgraph/nodes/nodes_test.go
index ab6d9713455f..727725b8c55b 100644
--- a/pkg/oc/lib/graph/appsgraph/nodes/nodes_test.go
+++ b/pkg/oc/lib/graph/appsgraph/nodes/nodes_test.go
@@ -5,8 +5,8 @@ import (
 
 	"github.com/gonum/graph/topo"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
-	appstest "github.com/openshift/origin/pkg/apps/apis/apps/internaltest"
+	appsv1 "github.com/openshift/api/apps/v1"
+	appstest "github.com/openshift/origin/pkg/apps/util/test"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubetypes "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
@@ -14,7 +14,7 @@ import (
 func TestDCPodTemplateSpecNode(t *testing.T) {
 	g := osgraph.New()
 
-	dc := &appsapi.DeploymentConfig{}
+	dc := &appsv1.DeploymentConfig{}
 	dc.Namespace = "ns"
 	dc.Name = "foo"
 	dc.Spec.Template = appstest.OkPodTemplate()
diff --git a/pkg/oc/lib/graph/appsgraph/nodes/types.go b/pkg/oc/lib/graph/appsgraph/nodes/types.go
index a5f40595f776..3f8797171798 100644
--- a/pkg/oc/lib/graph/appsgraph/nodes/types.go
+++ b/pkg/oc/lib/graph/appsgraph/nodes/types.go
@@ -3,21 +3,21 @@ package nodes
 import (
 	"reflect"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsv1 "github.com/openshift/api/apps/v1"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
 
 var (
-	DeploymentConfigNodeKind = reflect.TypeOf(appsapi.DeploymentConfig{}).Name()
+	DeploymentConfigNodeKind = reflect.TypeOf(appsv1.DeploymentConfig{}).Name()
 )
 
-func DeploymentConfigNodeName(o *appsapi.DeploymentConfig) osgraph.UniqueName {
+func DeploymentConfigNodeName(o *appsv1.DeploymentConfig) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(DeploymentConfigNodeKind, o)
 }
 
 type DeploymentConfigNode struct {
 	osgraph.Node
-	DeploymentConfig *appsapi.DeploymentConfig
+	DeploymentConfig *appsv1.DeploymentConfig
 
 	IsFound bool
 }
diff --git a/pkg/oc/lib/graph/buildgraph/analysis/bc.go b/pkg/oc/lib/graph/buildgraph/analysis/bc.go
index ddb549984a5c..e36fa53a6edc 100644
--- a/pkg/oc/lib/graph/buildgraph/analysis/bc.go
+++ b/pkg/oc/lib/graph/buildgraph/analysis/bc.go
@@ -10,7 +10,8 @@ import (
 
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
+	buildv1 "github.com/openshift/api/build/v1"
+	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	buildedges "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph"
 	buildgraph "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/nodes"
@@ -97,13 +98,17 @@ func FindMissingInputImageStreams(g osgraph.Graph, f osgraph.Namer) []osgraph.Ma
 					// prior to our reaching this point in the code; so there is not need to check for that type vs. ImageStreamTag or ImageStreamImage;
 
 					tagNode, _ := bcInputNode.(*imagegraph.ImageStreamTagNode)
-					imageStream := imageStreamNode.Object().(*imageapi.ImageStream)
-					if _, ok := imageStream.Status.Tags[tagNode.ImageTag()]; !ok {
-
+					imageStream := imageStreamNode.Object().(*imagev1.ImageStream)
+					found := false
+					for _, tag := range imageStream.Status.Tags {
+						if tag.Tag == tagNode.ImageTag() {
+							found = true
+							break
+						}
+					}
+					if !found {
 						markers = append(markers, getImageStreamTagMarker(g, f, bcInputNode, imageStreamNode, tagNode, bcNode))
-
 					}
-
 				}
 
 			case *imagegraph.ImageStreamImageNode:
@@ -112,7 +117,7 @@ func FindMissingInputImageStreams(g osgraph.Graph, f osgraph.Namer) []osgraph.Ma
 					imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)
 
 					imageNode, _ := bcInputNode.(*imagegraph.ImageStreamImageNode)
-					imageStream := imageStreamNode.Object().(*imageapi.ImageStream)
+					imageStream := imageStreamNode.Object().(*imagev1.ImageStream)
 					found, imageID := validImageStreamImage(imageNode, imageStream)
 					if !found {
 
@@ -206,14 +211,14 @@ func findPendingTagMarkers(istNode *imagegraph.ImageStreamTagNode, g osgraph.Gra
 		// the latest build.
 		// TODO: Handle other build phases.
 		switch latestBuild.Build.Status.Phase {
-		case buildapi.BuildPhaseCancelled:
+		case buildv1.BuildPhaseCancelled:
 			// TODO: Add a warning here.
-		case buildapi.BuildPhaseError:
+		case buildv1.BuildPhaseError:
 			// TODO: Add a warning here.
-		case buildapi.BuildPhaseComplete:
+		case buildv1.BuildPhaseComplete:
 			// We should never hit this. The output of our build is missing but the build is complete.
 			// Most probably the user has messed up?
-		case buildapi.BuildPhaseFailed:
+		case buildv1.BuildPhaseFailed:
 			// Since the tag hasn't been populated yet, we assume there hasn't been a successful
 			// build so far.
 			markers = append(markers, osgraph.Marker{
@@ -295,7 +300,7 @@ func getImageStreamTagSuggestion(g osgraph.Graph, f osgraph.Namer, tagNode *imag
 }
 
 // getImageStreamImageMarker will return the appropriate marker for when a BuildConfig is missing its input ImageStreamImage
-func getImageStreamImageMarker(g osgraph.Graph, f osgraph.Namer, bcNode graph.Node, bcInputNode graph.Node, imageStreamNode graph.Node, imageNode *imagegraph.ImageStreamImageNode, imageStream *imageapi.ImageStream, imageID string) osgraph.Marker {
+func getImageStreamImageMarker(g osgraph.Graph, f osgraph.Namer, bcNode graph.Node, bcInputNode graph.Node, imageStreamNode graph.Node, imageNode *imagegraph.ImageStreamImageNode, imageStream *imagev1.ImageStream, imageID string) osgraph.Marker {
 	return osgraph.Marker{
 		Node: bcNode,
 		RelatedNodes: []graph.Node{bcInputNode,
@@ -308,7 +313,7 @@ func getImageStreamImageMarker(g osgraph.Graph, f osgraph.Namer, bcNode graph.No
 }
 
 // getImageStreamImageSuggestion will return the appropriate marker Suggestion for when a BuildConfig is missing its input ImageStreamImage
-func getImageStreamImageSuggestion(imageID string, imageStream *imageapi.ImageStream) osgraph.Suggestion {
+func getImageStreamImageSuggestion(imageID string, imageStream *imagev1.ImageStream) osgraph.Suggestion {
 	// check the images stream to see if any import images are in flight or have failed
 	annotation, ok := imageStream.Annotations[imageapi.DockerImageRepositoryCheckAnnotation]
 	if !ok {
@@ -335,7 +340,7 @@ func getImageStreamImageSuggestion(imageID string, imageStream *imageapi.ImageSt
 // validImageStreamImage will cycle through the imageStream.Status.Tags.[]TagEvent.DockerImageReference and  determine whether an image with the hexadecimal image id
 // associated with an ImageStreamImage reference in fact exists in a given ImageStream; on return, this method returns a true if does exist, and as well as the hexadecimal image
 // id from the ImageStreamImage
-func validImageStreamImage(imageNode *imagegraph.ImageStreamImageNode, imageStream *imageapi.ImageStream) (bool, string) {
+func validImageStreamImage(imageNode *imagegraph.ImageStreamImageNode, imageStream *imagev1.ImageStream) (bool, string) {
 	dockerImageReference, err := imageapi.ParseDockerImageReference(imageNode.Name)
 	if err == nil {
 		for _, tagEventList := range imageStream.Status.Tags {
diff --git a/pkg/oc/lib/graph/buildgraph/edges.go b/pkg/oc/lib/graph/buildgraph/edges.go
index 5cf01b686d34..bdd62f49eb26 100644
--- a/pkg/oc/lib/graph/buildgraph/edges.go
+++ b/pkg/oc/lib/graph/buildgraph/edges.go
@@ -2,10 +2,10 @@ package buildgraph
 
 import (
 	"github.com/gonum/graph"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
-	buildinternalhelpers "github.com/openshift/origin/pkg/build/apis/build/internal_helpers"
+	buildv1 "github.com/openshift/api/build/v1"
+	buildutil "github.com/openshift/origin/pkg/build/util"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	buildgraph "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
@@ -59,7 +59,7 @@ func AddAllBuildEdges(g osgraph.MutableUniqueGraph) {
 	}
 }
 
-func imageRefNode(g osgraph.MutableUniqueGraph, ref *kapi.ObjectReference, bc *buildapi.BuildConfig) graph.Node {
+func imageRefNode(g osgraph.MutableUniqueGraph, ref *corev1.ObjectReference, bc *buildv1.BuildConfig) graph.Node {
 	if ref == nil {
 		return nil
 	}
@@ -94,7 +94,7 @@ func AddInputEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNod
 	if in := buildgraph.EnsureSourceRepositoryNode(g, node.BuildConfig.Spec.Source); in != nil {
 		g.AddEdge(in, node, BuildInputEdgeKind)
 	}
-	inputImage := buildinternalhelpers.GetInputReference(node.BuildConfig.Spec.Strategy)
+	inputImage := buildutil.GetInputReference(node.BuildConfig.Spec.Strategy)
 	if input := imageRefNode(g, inputImage, node.BuildConfig); input != nil {
 		g.AddEdge(input, node, BuildInputImageEdgeKind)
 	}
@@ -103,12 +103,12 @@ func AddInputEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNod
 // AddTriggerEdges links the build config to its trigger input image nodes.
 func AddTriggerEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNode) {
 	for _, trigger := range node.BuildConfig.Spec.Triggers {
-		if trigger.Type != buildapi.ImageChangeBuildTriggerType {
+		if trigger.Type != buildv1.ImageChangeBuildTriggerType {
 			continue
 		}
 		from := trigger.ImageChange.From
 		if trigger.ImageChange.From == nil {
-			from = buildinternalhelpers.GetInputReference(node.BuildConfig.Spec.Strategy)
+			from = buildutil.GetInputReference(node.BuildConfig.Spec.Strategy)
 		}
 		triggerNode := imageRefNode(g, from, node.BuildConfig)
 		g.AddEdge(triggerNode, node, BuildTriggerImageEdgeKind)
diff --git a/pkg/oc/lib/graph/buildgraph/edges_test.go b/pkg/oc/lib/graph/buildgraph/edges_test.go
index 6a53d22e3287..4491edd5c525 100644
--- a/pkg/oc/lib/graph/buildgraph/edges_test.go
+++ b/pkg/oc/lib/graph/buildgraph/edges_test.go
@@ -9,6 +9,7 @@ import (
 	"k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/runtime"
 
+	buildv1 "github.com/openshift/api/build/v1"
 	buildapi "github.com/openshift/origin/pkg/build/apis/build"
 	nodes "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
@@ -22,12 +23,12 @@ func TestNamespaceEdgeMatching(t *testing.T) {
 	g := osgraph.New()
 
 	fn := func(namespace string, g osgraph.Interface) {
-		bc := &buildapi.BuildConfig{}
+		bc := &buildv1.BuildConfig{}
 		bc.Namespace = namespace
 		bc.Name = "the-bc"
 		nodes.EnsureBuildConfigNode(g, bc)
 
-		b := &buildapi.Build{}
+		b := &buildv1.Build{}
 		b.Namespace = namespace
 		b.Name = "the-build"
 		b.Labels = map[string]string{buildapi.BuildConfigLabel: "the-bc"}
diff --git a/pkg/oc/lib/graph/buildgraph/helpers.go b/pkg/oc/lib/graph/buildgraph/helpers.go
index 926ad4c6efa3..7f8927bfa3f8 100644
--- a/pkg/oc/lib/graph/buildgraph/helpers.go
+++ b/pkg/oc/lib/graph/buildgraph/helpers.go
@@ -5,6 +5,7 @@ import (
 
 	"github.com/gonum/graph"
 
+	buildv1 "github.com/openshift/api/build/v1"
 	buildapi "github.com/openshift/origin/pkg/build/apis/build"
 	buildgraph "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
@@ -35,11 +36,11 @@ func RelevantBuilds(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (*build
 
 	for i := range allBuilds {
 		switch allBuilds[i].Build.Status.Phase {
-		case buildapi.BuildPhaseComplete:
+		case buildv1.BuildPhaseComplete:
 			if lastSuccessfulBuild == nil {
 				lastSuccessfulBuild = allBuilds[i]
 			}
-		case buildapi.BuildPhaseFailed, buildapi.BuildPhaseCancelled, buildapi.BuildPhaseError:
+		case buildv1.BuildPhaseFailed, buildv1.BuildPhaseCancelled, buildv1.BuildPhaseError:
 			if lastUnsuccessfulBuild == nil {
 				lastUnsuccessfulBuild = allBuilds[i]
 			}
@@ -51,7 +52,7 @@ func RelevantBuilds(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (*build
 	return lastSuccessfulBuild, lastUnsuccessfulBuild, activeBuilds
 }
 
-func belongsToBuildConfig(config *buildapi.BuildConfig, b *buildapi.Build) bool {
+func belongsToBuildConfig(config *buildv1.BuildConfig, b *buildv1.Build) bool {
 	if b.Labels == nil {
 		return false
 	}
diff --git a/pkg/oc/lib/graph/buildgraph/nodes/nodes.go b/pkg/oc/lib/graph/buildgraph/nodes/nodes.go
index d666a0fe4827..98bb3611dc34 100644
--- a/pkg/oc/lib/graph/buildgraph/nodes/nodes.go
+++ b/pkg/oc/lib/graph/buildgraph/nodes/nodes.go
@@ -3,12 +3,12 @@ package nodes
 import (
 	"github.com/gonum/graph"
 
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
+	buildv1 "github.com/openshift/api/build/v1"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
 
 // EnsureBuildConfigNode adds a graph node for the specific build config if it does not exist
-func EnsureBuildConfigNode(g osgraph.MutableUniqueGraph, config *buildapi.BuildConfig) *BuildConfigNode {
+func EnsureBuildConfigNode(g osgraph.MutableUniqueGraph, config *buildv1.BuildConfig) *BuildConfigNode {
 	return osgraph.EnsureUnique(
 		g,
 		BuildConfigNodeName(config),
@@ -22,7 +22,7 @@ func EnsureBuildConfigNode(g osgraph.MutableUniqueGraph, config *buildapi.BuildC
 }
 
 // EnsureSourceRepositoryNode adds the specific BuildSource to the graph if it does not already exist.
-func EnsureSourceRepositoryNode(g osgraph.MutableUniqueGraph, source buildapi.BuildSource) *SourceRepositoryNode {
+func EnsureSourceRepositoryNode(g osgraph.MutableUniqueGraph, source buildv1.BuildSource) *SourceRepositoryNode {
 	switch {
 	case source.Git != nil:
 	default:
@@ -37,7 +37,7 @@ func EnsureSourceRepositoryNode(g osgraph.MutableUniqueGraph, source buildapi.Bu
 }
 
 // EnsureBuildNode adds a graph node for the build if it does not already exist.
-func EnsureBuildNode(g osgraph.MutableUniqueGraph, build *buildapi.Build) *BuildNode {
+func EnsureBuildNode(g osgraph.MutableUniqueGraph, build *buildv1.Build) *BuildNode {
 	return osgraph.EnsureUnique(g,
 		BuildNodeName(build),
 		func(node osgraph.Node) graph.Node {
diff --git a/pkg/oc/lib/graph/buildgraph/nodes/types.go b/pkg/oc/lib/graph/buildgraph/nodes/types.go
index 88d7e3103682..2797751d4ec8 100644
--- a/pkg/oc/lib/graph/buildgraph/nodes/types.go
+++ b/pkg/oc/lib/graph/buildgraph/nodes/types.go
@@ -4,25 +4,25 @@ import (
 	"fmt"
 	"reflect"
 
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
+	buildv1 "github.com/openshift/api/build/v1"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
 
 var (
-	BuildConfigNodeKind = reflect.TypeOf(buildapi.BuildConfig{}).Name()
-	BuildNodeKind       = reflect.TypeOf(buildapi.Build{}).Name()
+	BuildConfigNodeKind = reflect.TypeOf(buildv1.BuildConfig{}).Name()
+	BuildNodeKind       = reflect.TypeOf(buildv1.Build{}).Name()
 
 	// non-api types
-	SourceRepositoryNodeKind = reflect.TypeOf(buildapi.BuildSource{}).Name()
+	SourceRepositoryNodeKind = reflect.TypeOf(buildv1.BuildSource{}).Name()
 )
 
-func BuildConfigNodeName(o *buildapi.BuildConfig) osgraph.UniqueName {
+func BuildConfigNodeName(o *buildv1.BuildConfig) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(BuildConfigNodeKind, o)
 }
 
 type BuildConfigNode struct {
 	osgraph.Node
-	BuildConfig *buildapi.BuildConfig
+	BuildConfig *buildv1.BuildConfig
 }
 
 func (n BuildConfigNode) Object() interface{} {
@@ -41,7 +41,7 @@ func (*BuildConfigNode) Kind() string {
 	return BuildConfigNodeKind
 }
 
-func SourceRepositoryNodeName(source buildapi.BuildSource) osgraph.UniqueName {
+func SourceRepositoryNodeName(source buildv1.BuildSource) osgraph.UniqueName {
 	switch {
 	case source.Git != nil:
 		sourceType, uri, ref := "git", source.Git.URI, source.Git.Ref
@@ -53,7 +53,7 @@ func SourceRepositoryNodeName(source buildapi.BuildSource) osgraph.UniqueName {
 
 type SourceRepositoryNode struct {
 	osgraph.Node
-	Source buildapi.BuildSource
+	Source buildv1.BuildSource
 }
 
 func (n SourceRepositoryNode) String() string {
@@ -64,13 +64,13 @@ func (SourceRepositoryNode) Kind() string {
 	return SourceRepositoryNodeKind
 }
 
-func BuildNodeName(o *buildapi.Build) osgraph.UniqueName {
+func BuildNodeName(o *buildv1.Build) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(BuildNodeKind, o)
 }
 
 type BuildNode struct {
 	osgraph.Node
-	Build *buildapi.Build
+	Build *buildv1.Build
 }
 
 func (n BuildNode) Object() interface{} {
diff --git a/pkg/oc/lib/graph/genericgraph/graphview/daemonset.go b/pkg/oc/lib/graph/genericgraph/graphview/daemonset.go
index 48e9aaa1ce06..8161a8e88cab 100644
--- a/pkg/oc/lib/graph/genericgraph/graphview/daemonset.go
+++ b/pkg/oc/lib/graph/genericgraph/graphview/daemonset.go
@@ -1,17 +1,17 @@
 package graphview
 
 import (
-	appsedges "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
-	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
+	"github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
+	kubenodes "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
 type DaemonSet struct {
-	DaemonSet *kubegraph.DaemonSetNode
+	DaemonSet *kubenodes.DaemonSetNode
 
-	OwnedPods   []*kubegraph.PodNode
-	CreatedPods []*kubegraph.PodNode
+	OwnedPods   []*kubenodes.PodNode
+	CreatedPods []*kubenodes.PodNode
 
 	Images []ImagePipeline
 }
@@ -21,12 +21,12 @@ func AllDaemonSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]DaemonSet, IntSet)
 	covered := IntSet{}
 	views := []DaemonSet{}
 
-	for _, uncastNode := range g.NodesByKind(kubegraph.DaemonSetNodeKind) {
+	for _, uncastNode := range g.NodesByKind(kubenodes.DaemonSetNodeKind) {
 		if excludeNodeIDs.Has(uncastNode.ID()) {
 			continue
 		}
 
-		view, covers := NewDaemonSet(g, uncastNode.(*kubegraph.DaemonSetNode))
+		view, covers := NewDaemonSet(g, uncastNode.(*kubenodes.DaemonSetNode))
 		covered.Insert(covers.List()...)
 		views = append(views, view)
 	}
@@ -35,27 +35,27 @@ func AllDaemonSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]DaemonSet, IntSet)
 }
 
 // NewDaemonSet returns the DaemonSet and a set of all the NodeIDs covered by the DaemonSet
-func NewDaemonSet(g osgraph.Graph, node *kubegraph.DaemonSetNode) (DaemonSet, IntSet) {
+func NewDaemonSet(g osgraph.Graph, node *kubenodes.DaemonSetNode) (DaemonSet, IntSet) {
 	covered := IntSet{}
 	covered.Insert(node.ID())
 
 	view := DaemonSet{}
 	view.DaemonSet = node
 
-	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) {
-		podNode := uncastPodNode.(*kubegraph.PodNode)
+	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, appsgraph.ManagedByControllerEdgeKind) {
+		podNode := uncastPodNode.(*kubenodes.PodNode)
 		covered.Insert(podNode.ID())
 		view.OwnedPods = append(view.OwnedPods, podNode)
 	}
 
-	for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.TriggersDeploymentEdgeKind) {
+	for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubegraph.TriggersDeploymentEdgeKind) {
 		imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation))
 		covered.Insert(covers.List()...)
 		view.Images = append(view.Images, imagePipeline)
 	}
 
 	// for image that we use, create an image pipeline and add it to the list
-	for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsedges.UsedInDeploymentEdgeKind) {
+	for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsgraph.UsedInDeploymentEdgeKind) {
 		imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation))
 
 		covered.Insert(covers.List()...)
diff --git a/pkg/oc/lib/graph/genericgraph/graphview/image_pipeline.go b/pkg/oc/lib/graph/genericgraph/graphview/image_pipeline.go
index 6209f2fa6d8c..393d502d5d32 100644
--- a/pkg/oc/lib/graph/genericgraph/graphview/image_pipeline.go
+++ b/pkg/oc/lib/graph/genericgraph/graphview/image_pipeline.go
@@ -207,9 +207,11 @@ func imageStreamTagScheduled(g osgraph.Graph, input graph.Node, base ImageTagLoc
 	for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(input, imageedges.ReferencedImageStreamGraphEdgeKind) {
 		imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode)
 		if imageStreamNode.ImageStream != nil {
-			if tag, ok := imageStreamNode.ImageStream.Spec.Tags[base.ImageTag()]; ok {
-				scheduled = tag.ImportPolicy.Scheduled
-				return
+			for _, tag := range imageStreamNode.ImageStream.Spec.Tags {
+				if tag.Name == base.ImageTag() {
+					scheduled = tag.ImportPolicy.Scheduled
+					return
+				}
 			}
 		}
 	}
diff --git a/pkg/oc/lib/graph/genericgraph/graphview/petset.go b/pkg/oc/lib/graph/genericgraph/graphview/petset.go
index aa5dece2ec59..d9c08bb7cba5 100644
--- a/pkg/oc/lib/graph/genericgraph/graphview/petset.go
+++ b/pkg/oc/lib/graph/genericgraph/graphview/petset.go
@@ -1,17 +1,17 @@
 package graphview
 
 import (
-	appsedges "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
-	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
+	"github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
+	kubenodes "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
 type StatefulSet struct {
-	StatefulSet *kubegraph.StatefulSetNode
+	StatefulSet *kubenodes.StatefulSetNode
 
-	OwnedPods   []*kubegraph.PodNode
-	CreatedPods []*kubegraph.PodNode
+	OwnedPods   []*kubenodes.PodNode
+	CreatedPods []*kubenodes.PodNode
 
 	Images []ImagePipeline
 
@@ -23,12 +23,12 @@ func AllStatefulSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]StatefulSet, Int
 	covered := IntSet{}
 	views := []StatefulSet{}
 
-	for _, uncastNode := range g.NodesByKind(kubegraph.StatefulSetNodeKind) {
+	for _, uncastNode := range g.NodesByKind(kubenodes.StatefulSetNodeKind) {
 		if excludeNodeIDs.Has(uncastNode.ID()) {
 			continue
 		}
 
-		view, covers := NewStatefulSet(g, uncastNode.(*kubegraph.StatefulSetNode))
+		view, covers := NewStatefulSet(g, uncastNode.(*kubenodes.StatefulSetNode))
 		covered.Insert(covers.List()...)
 		views = append(views, view)
 	}
@@ -37,27 +37,27 @@ func AllStatefulSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]StatefulSet, Int
 }
 
 // NewStatefulSet returns the StatefulSet and a set of all the NodeIDs covered by the StatefulSet
-func NewStatefulSet(g osgraph.Graph, node *kubegraph.StatefulSetNode) (StatefulSet, IntSet) {
+func NewStatefulSet(g osgraph.Graph, node *kubenodes.StatefulSetNode) (StatefulSet, IntSet) {
 	covered := IntSet{}
 	covered.Insert(node.ID())
 
 	view := StatefulSet{}
 	view.StatefulSet = node
 
-	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) {
-		podNode := uncastPodNode.(*kubegraph.PodNode)
+	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, appsgraph.ManagedByControllerEdgeKind) {
+		podNode := uncastPodNode.(*kubenodes.PodNode)
 		covered.Insert(podNode.ID())
 		view.OwnedPods = append(view.OwnedPods, podNode)
 	}
 
-	for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.TriggersDeploymentEdgeKind) {
+	for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubegraph.TriggersDeploymentEdgeKind) {
 		imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation))
 		covered.Insert(covers.List()...)
 		view.Images = append(view.Images, imagePipeline)
 	}
 
 	// for image that we use, create an image pipeline and add it to the list
-	for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsedges.UsedInDeploymentEdgeKind) {
+	for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsgraph.UsedInDeploymentEdgeKind) {
 		imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation))
 
 		covered.Insert(covers.List()...)
diff --git a/pkg/oc/lib/graph/genericgraph/graphview/rc.go b/pkg/oc/lib/graph/genericgraph/graphview/rc.go
index 6443d3c0631d..ec9906cebf6d 100644
--- a/pkg/oc/lib/graph/genericgraph/graphview/rc.go
+++ b/pkg/oc/lib/graph/genericgraph/graphview/rc.go
@@ -3,20 +3,20 @@ package graphview
 import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
 	"github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/analysis"
-	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
+	kubenodes "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
 type ReplicationController struct {
-	RC *kubegraph.ReplicationControllerNode
+	RC *kubenodes.ReplicationControllerNode
 
-	OwnedPods   []*kubegraph.PodNode
-	CreatedPods []*kubegraph.PodNode
+	OwnedPods   []*kubenodes.PodNode
+	CreatedPods []*kubenodes.PodNode
 
-	ConflictingRCs        []*kubegraph.ReplicationControllerNode
-	ConflictingRCIDToPods map[int][]*kubegraph.PodNode
+	ConflictingRCs        []*kubenodes.ReplicationControllerNode
+	ConflictingRCIDToPods map[int][]*kubenodes.PodNode
 }
 
 // AllReplicationControllers returns all the ReplicationControllers that aren't in the excludes set and the set of covered NodeIDs
@@ -24,12 +24,12 @@ func AllReplicationControllers(g osgraph.Graph, excludeNodeIDs IntSet) ([]Replic
 	covered := IntSet{}
 	rcViews := []ReplicationController{}
 
-	for _, uncastNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) {
+	for _, uncastNode := range g.NodesByKind(kubenodes.ReplicationControllerNodeKind) {
 		if excludeNodeIDs.Has(uncastNode.ID()) {
 			continue
 		}
 
-		rcView, covers := NewReplicationController(g, uncastNode.(*kubegraph.ReplicationControllerNode))
+		rcView, covers := NewReplicationController(g, uncastNode.(*kubenodes.ReplicationControllerNode))
 		covered.Insert(covers.List()...)
 		rcViews = append(rcViews, rcView)
 	}
@@ -52,33 +52,33 @@ func (rc *ReplicationController) MaxRecentContainerRestarts() int32 {
 }
 
 // NewReplicationController returns the ReplicationController and a set of all the NodeIDs covered by the ReplicationController
-func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationControllerNode) (ReplicationController, IntSet) {
+func NewReplicationController(g osgraph.Graph, rcNode *kubenodes.ReplicationControllerNode) (ReplicationController, IntSet) {
 	covered := IntSet{}
 	covered.Insert(rcNode.ID())
 
 	rcView := ReplicationController{}
 	rcView.RC = rcNode
-	rcView.ConflictingRCIDToPods = map[int][]*kubegraph.PodNode{}
+	rcView.ConflictingRCIDToPods = map[int][]*kubenodes.PodNode{}
 
-	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) {
-		podNode := uncastPodNode.(*kubegraph.PodNode)
+	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, appsgraph.ManagedByControllerEdgeKind) {
+		podNode := uncastPodNode.(*kubenodes.PodNode)
 		covered.Insert(podNode.ID())
 		rcView.OwnedPods = append(rcView.OwnedPods, podNode)
 
 		// check to see if this pod is managed by more than one RC
-		uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind)
+		uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, appsgraph.ManagedByControllerEdgeKind)
 		if len(uncastOwningRCs) > 1 {
 			for _, uncastOwningRC := range uncastOwningRCs {
 				if uncastOwningRC.ID() == rcNode.ID() {
 					continue
 				}
 
-				conflictingRC := uncastOwningRC.(*kubegraph.ReplicationControllerNode)
+				conflictingRC := uncastOwningRC.(*kubenodes.ReplicationControllerNode)
 				rcView.ConflictingRCs = append(rcView.ConflictingRCs, conflictingRC)
 
 				conflictingPods, ok := rcView.ConflictingRCIDToPods[conflictingRC.ID()]
 				if !ok {
-					conflictingPods = []*kubegraph.PodNode{}
+					conflictingPods = []*kubenodes.PodNode{}
 				}
 				conflictingPods = append(conflictingPods, podNode)
 				rcView.ConflictingRCIDToPods[conflictingRC.ID()] = conflictingPods
@@ -91,7 +91,7 @@ func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationCont
 
 // MaxRecentContainerRestartsForRC returns the maximum container restarts in pods
 // in the replication controller node for the last 10 minutes.
-func MaxRecentContainerRestartsForRC(g osgraph.Graph, rcNode *kubegraph.ReplicationControllerNode) int32 {
+func MaxRecentContainerRestartsForRC(g osgraph.Graph, rcNode *kubenodes.ReplicationControllerNode) int32 {
 	if rcNode == nil {
 		return 0
 	}
diff --git a/pkg/oc/lib/graph/genericgraph/graphview/rs.go b/pkg/oc/lib/graph/genericgraph/graphview/rs.go
index 220b45c2385a..42b573d5a4d7 100644
--- a/pkg/oc/lib/graph/genericgraph/graphview/rs.go
+++ b/pkg/oc/lib/graph/genericgraph/graphview/rs.go
@@ -1,30 +1,31 @@
 package graphview
 
 import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
 	"github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/analysis"
-	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	kubenodes "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
 type ReplicaSet struct {
-	RS *kubegraph.ReplicaSetNode
+	RS *kubenodes.ReplicaSetNode
 
-	OwnedPods   []*kubegraph.PodNode
-	CreatedPods []*kubegraph.PodNode
+	OwnedPods   []*kubenodes.PodNode
+	CreatedPods []*kubenodes.PodNode
 }
 
 func AllReplicaSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]ReplicaSet, IntSet) {
 	covered := IntSet{}
 	rsViews := []ReplicaSet{}
 
-	for _, uncastNode := range g.NodesByKind(kubegraph.ReplicaSetNodeKind) {
+	for _, uncastNode := range g.NodesByKind(kubenodes.ReplicaSetNodeKind) {
 		if excludeNodeIDs.Has(uncastNode.ID()) {
 			continue
 		}
 
-		rsView, covers := NewReplicaSet(g, uncastNode.(*kubegraph.ReplicaSetNode))
+		rsView, covers := NewReplicaSet(g, uncastNode.(*kubenodes.ReplicaSetNode))
 		covered.Insert(covers.List()...)
 		rsViews = append(rsViews, rsView)
 	}
@@ -46,15 +47,15 @@ func (rs *ReplicaSet) MaxRecentContainerRestarts() int32 {
 }
 
 // NewReplicationController returns the ReplicationController and a set of all the NodeIDs covered by the ReplicationController
-func NewReplicaSet(g osgraph.Graph, rsNode *kubegraph.ReplicaSetNode) (ReplicaSet, IntSet) {
+func NewReplicaSet(g osgraph.Graph, rsNode *kubenodes.ReplicaSetNode) (ReplicaSet, IntSet) {
 	covered := IntSet{}
 	covered.Insert(rsNode.ID())
 
 	rsView := ReplicaSet{}
 	rsView.RS = rsNode
 
-	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rsNode, kubeedges.ManagedByControllerEdgeKind) {
-		podNode := uncastPodNode.(*kubegraph.PodNode)
+	for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rsNode, appsgraph.ManagedByControllerEdgeKind) {
+		podNode := uncastPodNode.(*kubenodes.PodNode)
 		covered.Insert(podNode.ID())
 		rsView.OwnedPods = append(rsView.OwnedPods, podNode)
 	}
@@ -62,7 +63,7 @@ func NewReplicaSet(g osgraph.Graph, rsNode *kubegraph.ReplicaSetNode) (ReplicaSe
 	return rsView, covered
 }
 
-func MaxRecentContainerRestartsForRS(g osgraph.Graph, rsNode *kubegraph.ReplicaSetNode) int32 {
+func MaxRecentContainerRestartsForRS(g osgraph.Graph, rsNode *kubenodes.ReplicaSetNode) int32 {
 	if rsNode == nil {
 		return 0
 	}
diff --git a/pkg/oc/lib/graph/genericgraph/graphview/veneering_test.go b/pkg/oc/lib/graph/genericgraph/graphview/veneering_test.go
index 7680349553b7..99ab5126ffee 100644
--- a/pkg/oc/lib/graph/genericgraph/graphview/veneering_test.go
+++ b/pkg/oc/lib/graph/genericgraph/graphview/veneering_test.go
@@ -7,10 +7,11 @@ import (
 	"github.com/gonum/graph"
 	"github.com/gonum/graph/simple"
 
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsv1 "github.com/openshift/api/apps/v1"
+	buildv1 "github.com/openshift/api/build/v1"
 	buildapi "github.com/openshift/origin/pkg/build/apis/build"
 	appsedges "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
@@ -187,35 +188,35 @@ func TestBareBCGroup(t *testing.T) {
 func TestGraph(t *testing.T) {
 	g := osgraph.New()
 	now := time.Now()
-	builds := []buildapi.Build{
+	builds := []buildv1.Build{
 		{
 			ObjectMeta: metav1.ObjectMeta{
 				Name:              "build1-1-abc",
-				Labels:            map[string]string{buildapi.BuildConfigLabelDeprecated: "build1"},
+				Labels:            map[string]string{buildapi.BuildConfigLabel: "build1"},
 				CreationTimestamp: metav1.NewTime(now.Add(-10 * time.Second)),
 			},
-			Status: buildapi.BuildStatus{
-				Phase: buildapi.BuildPhaseFailed,
+			Status: buildv1.BuildStatus{
+				Phase: buildv1.BuildPhaseFailed,
 			},
 		},
 		{
 			ObjectMeta: metav1.ObjectMeta{
 				Name:              "build1-2-abc",
-				Labels:            map[string]string{buildapi.BuildConfigLabelDeprecated: "build1"},
+				Labels:            map[string]string{buildapi.BuildConfigLabel: "build1"},
 				CreationTimestamp: metav1.NewTime(now.Add(-5 * time.Second)),
 			},
-			Status: buildapi.BuildStatus{
-				Phase: buildapi.BuildPhaseComplete,
+			Status: buildv1.BuildStatus{
+				Phase: buildv1.BuildPhaseComplete,
 			},
 		},
 		{
 			ObjectMeta: metav1.ObjectMeta{
 				Name:              "build1-3-abc",
-				Labels:            map[string]string{buildapi.BuildConfigLabelDeprecated: "build1"},
+				Labels:            map[string]string{buildapi.BuildConfigLabel: "build1"},
 				CreationTimestamp: metav1.NewTime(now.Add(-15 * time.Second)),
 			},
-			Status: buildapi.BuildStatus{
-				Phase: buildapi.BuildPhasePending,
+			Status: buildv1.BuildStatus{
+				Phase: buildv1.BuildPhasePending,
 			},
 		},
 	}
@@ -223,89 +224,89 @@ func TestGraph(t *testing.T) {
 		buildgraph.EnsureBuildNode(g, &builds[i])
 	}
 
-	buildgraph.EnsureBuildConfigNode(g, &buildapi.BuildConfig{
+	buildgraph.EnsureBuildConfigNode(g, &buildv1.BuildConfig{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "build1"},
-		Spec: buildapi.BuildConfigSpec{
-			Triggers: []buildapi.BuildTriggerPolicy{
+		Spec: buildv1.BuildConfigSpec{
+			Triggers: []buildv1.BuildTriggerPolicy{
 				{
-					ImageChange: &buildapi.ImageChangeTrigger{},
+					ImageChange: &buildv1.ImageChangeTrigger{},
 				},
 			},
-			CommonSpec: buildapi.CommonSpec{
-				Strategy: buildapi.BuildStrategy{
-					SourceStrategy: &buildapi.SourceBuildStrategy{
-						From: kapi.ObjectReference{Kind: "ImageStreamTag", Name: "test:base-image"},
+			CommonSpec: buildv1.CommonSpec{
+				Strategy: buildv1.BuildStrategy{
+					SourceStrategy: &buildv1.SourceBuildStrategy{
+						From: corev1.ObjectReference{Kind: "ImageStreamTag", Name: "test:base-image"},
 					},
 				},
-				Output: buildapi.BuildOutput{
-					To: &kapi.ObjectReference{Kind: "ImageStreamTag", Name: "other:tag1"},
+				Output: buildv1.BuildOutput{
+					To: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "other:tag1"},
 				},
 			},
 		},
 	})
-	bcTestNode := buildgraph.EnsureBuildConfigNode(g, &buildapi.BuildConfig{
+	bcTestNode := buildgraph.EnsureBuildConfigNode(g, &buildv1.BuildConfig{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "test"},
-		Spec: buildapi.BuildConfigSpec{
-			CommonSpec: buildapi.CommonSpec{
-				Output: buildapi.BuildOutput{
-					To: &kapi.ObjectReference{Kind: "ImageStreamTag", Name: "other:base-image"},
+		Spec: buildv1.BuildConfigSpec{
+			CommonSpec: buildv1.CommonSpec{
+				Output: buildv1.BuildOutput{
+					To: &corev1.ObjectReference{Kind: "ImageStreamTag", Name: "other:base-image"},
 				},
 			},
 		},
 	})
-	buildgraph.EnsureBuildConfigNode(g, &buildapi.BuildConfig{
+	buildgraph.EnsureBuildConfigNode(g, &buildv1.BuildConfig{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "build2"},
-		Spec: buildapi.BuildConfigSpec{
-			CommonSpec: buildapi.CommonSpec{
-				Output: buildapi.BuildOutput{
-					To: &kapi.ObjectReference{Kind: "DockerImage", Name: "mycustom/repo/image:tag2"},
+		Spec: buildv1.BuildConfigSpec{
+			CommonSpec: buildv1.CommonSpec{
+				Output: buildv1.BuildOutput{
+					To: &corev1.ObjectReference{Kind: "DockerImage", Name: "mycustom/repo/image:tag2"},
 				},
 			},
 		},
 	})
-	kubegraph.EnsureServiceNode(g, &kapi.Service{
+	kubegraph.EnsureServiceNode(g, &corev1.Service{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "svc-is-ignored"},
-		Spec: kapi.ServiceSpec{
+		Spec: corev1.ServiceSpec{
 			Selector: nil,
 		},
 	})
-	kubegraph.EnsureServiceNode(g, &kapi.Service{
+	kubegraph.EnsureServiceNode(g, &corev1.Service{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "svc1"},
-		Spec: kapi.ServiceSpec{
+		Spec: corev1.ServiceSpec{
 			Selector: map[string]string{
 				"deploymentconfig": "deploy1",
 			},
 		},
 	})
-	kubegraph.EnsureServiceNode(g, &kapi.Service{
+	kubegraph.EnsureServiceNode(g, &corev1.Service{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "svc2"},
-		Spec: kapi.ServiceSpec{
+		Spec: corev1.ServiceSpec{
 			Selector: map[string]string{
 				"deploymentconfig": "deploy1",
 				"env":              "prod",
 			},
 		},
 	})
-	appsgraph.EnsureDeploymentConfigNode(g, &appsapi.DeploymentConfig{
+	appsgraph.EnsureDeploymentConfigNode(g, &appsv1.DeploymentConfig{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "other", Name: "deploy1"},
-		Spec: appsapi.DeploymentConfigSpec{
-			Triggers: []appsapi.DeploymentTriggerPolicy{
+		Spec: appsv1.DeploymentConfigSpec{
+			Triggers: []appsv1.DeploymentTriggerPolicy{
 				{
-					ImageChangeParams: &appsapi.DeploymentTriggerImageChangeParams{
-						From:           kapi.ObjectReference{Kind: "ImageStreamTag", Namespace: "default", Name: "other:tag1"},
+					ImageChangeParams: &appsv1.DeploymentTriggerImageChangeParams{
+						From:           corev1.ObjectReference{Kind: "ImageStreamTag", Namespace: "default", Name: "other:tag1"},
 						ContainerNames: []string{"1", "2"},
 					},
 				},
 			},
-			Template: &kapi.PodTemplateSpec{
+			Template: &corev1.PodTemplateSpec{
 				ObjectMeta: metav1.ObjectMeta{
 					Labels: map[string]string{
 						"deploymentconfig": "deploy1",
 						"env":              "prod",
 					},
 				},
-				Spec: kapi.PodSpec{
-					Containers: []kapi.Container{
+				Spec: corev1.PodSpec{
+					Containers: []corev1.Container{
 						{
 							Name:  "1",
 							Image: "mycustom/repo/image",
@@ -323,18 +324,18 @@ func TestGraph(t *testing.T) {
 			},
 		},
 	})
-	appsgraph.EnsureDeploymentConfigNode(g, &appsapi.DeploymentConfig{
+	appsgraph.EnsureDeploymentConfigNode(g, &appsv1.DeploymentConfig{
 		ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "deploy2"},
-		Spec: appsapi.DeploymentConfigSpec{
-			Template: &kapi.PodTemplateSpec{
+		Spec: appsv1.DeploymentConfigSpec{
+			Template: &corev1.PodTemplateSpec{
 				ObjectMeta: metav1.ObjectMeta{
 					Labels: map[string]string{
 						"deploymentconfig": "deploy2",
 						"env":              "dev",
 					},
 				},
-				Spec: kapi.PodSpec{
-					Containers: []kapi.Container{
+				Spec: corev1.PodSpec{
+					Containers: []corev1.Container{
 						{
 							Name:  "1",
 							Image: "someother/image:v1",
diff --git a/pkg/oc/lib/graph/genericgraph/test/bad_secret_refs.yaml b/pkg/oc/lib/graph/genericgraph/test/bad_secret_refs.yaml
index 40853587d9df..4c7cef523a72 100644
--- a/pkg/oc/lib/graph/genericgraph/test/bad_secret_refs.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/bad_secret_refs.yaml
@@ -1,6 +1,6 @@
 apiVersion: v1
 items:
-- apiVersion: v1
+- apiVersion: image.openshift.io/v1
   kind: ImageStream
   metadata:
     creationTimestamp: null
@@ -9,7 +9,7 @@ items:
     dockerImageRepository: library/ubuntu-debootstrap:14.04
   status:
     dockerImageRepository: ""
-- apiVersion: v1
+- apiVersion: image.openshift.io/v1
   kind: ImageStream
   metadata:
     creationTimestamp: null
@@ -22,7 +22,7 @@ items:
       name: latest
   status:
     dockerImageRepository: ""
-- apiVersion: v1
+- apiVersion: build.openshift.io/v1
   kind: BuildConfig
   metadata:
     creationTimestamp: null
@@ -59,7 +59,7 @@ items:
   metadata:
     creationTimestamp: null
     name: unmountable-secret
-- apiVersion: v1
+- apiVersion: apps.openshift.io/v1
   kind: DeploymentConfig
   metadata:
     creationTimestamp: null
diff --git a/pkg/oc/lib/graph/genericgraph/test/dc-with-claim.yaml b/pkg/oc/lib/graph/genericgraph/test/dc-with-claim.yaml
index ec5086b72d75..2033c610b1a4 100644
--- a/pkg/oc/lib/graph/genericgraph/test/dc-with-claim.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/dc-with-claim.yaml
@@ -11,7 +11,7 @@ items:
    resources:
     requests:
       storage: 2Gi
-- apiVersion: v1
+- apiVersion: apps.openshift.io/v1
   kind: DeploymentConfig
   metadata:
     name: broken
diff --git a/pkg/oc/lib/graph/genericgraph/test/deployment.yaml b/pkg/oc/lib/graph/genericgraph/test/deployment.yaml
index 4a2ecb84ec22..7adbf440e48c 100644
--- a/pkg/oc/lib/graph/genericgraph/test/deployment.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/deployment.yaml
@@ -18,7 +18,7 @@ items:
     type: ClusterIP
   status:
     loadBalancer: {}
-- apiVersion: extensions/v1beta1
+- apiVersion: apps/v1
   kind: Deployment
   metadata:
     annotations:
diff --git a/pkg/oc/lib/graph/genericgraph/test/hpa-missing-cpu-target.yaml b/pkg/oc/lib/graph/genericgraph/test/hpa-missing-cpu-target.yaml
index 4b5205dfda21..59e195e7ae0b 100644
--- a/pkg/oc/lib/graph/genericgraph/test/hpa-missing-cpu-target.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/hpa-missing-cpu-target.yaml
@@ -1,17 +1,17 @@
-apiVersion: autoscaling/v2beta1
+apiVersion: autoscaling/v1
 kind: HorizontalPodAutoscaler
 metadata:
-  name: test-autoscaler 
+  name: test-autoscaler
 spec:
   scaleTargetRef:
-    kind: DeploymentConfig 
-    name: frontend 
+    kind: DeploymentConfig
+    name: frontend
     apiVersion: v1
-  minReplicas: 1 
+  minReplicas: 1
   maxReplicas: 10
   metrics:
     # a memory resource stops the default of 80% for cpu to apply
     - type: Resource
       resource:
         name: memory
-        targetAverageUtilization: 42
\ No newline at end of file
+        targetAverageUtilization: 42
diff --git a/pkg/oc/lib/graph/genericgraph/test/hpa-with-scale-ref.yaml b/pkg/oc/lib/graph/genericgraph/test/hpa-with-scale-ref.yaml
index 0d8758dee32c..ec954d5f8434 100644
--- a/pkg/oc/lib/graph/genericgraph/test/hpa-with-scale-ref.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/hpa-with-scale-ref.yaml
@@ -1,6 +1,6 @@
 apiVersion: v1
 items:
-- apiVersion: extensions/v1beta1
+- apiVersion: apps/v1
   kind: Deployment
   metadata:
     annotations:
@@ -70,7 +70,7 @@ items:
     maxReplicas: 1
     minReplicas: 1
     scaleTargetRef:
-      apiVersion: extensions/v1beta1
+      apiVersion: apps/v1
       kind: Deployment
       name: ruby-deploy
     targetCPUUtilizationPercentage: 80
diff --git a/pkg/oc/lib/graph/genericgraph/test/overlapping-hpas.yaml b/pkg/oc/lib/graph/genericgraph/test/overlapping-hpas.yaml
index dfc7e0e69256..65f10a50380c 100644
--- a/pkg/oc/lib/graph/genericgraph/test/overlapping-hpas.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/overlapping-hpas.yaml
@@ -4,41 +4,41 @@ items:
 - apiVersion: autoscaling/v1
   kind: HorizontalPodAutoscaler
   metadata:
-    name: overlapping 
+    name: overlapping
   spec:
     scaleTargetRef:
-      kind: DeploymentConfig 
-      name: frontend 
+      kind: DeploymentConfig
+      name: frontend
       apiVersion: apps.openshift.io/v1
       subresource: scale
-    minReplicas: 1 
-    maxReplicas: 10 
+    minReplicas: 1
+    maxReplicas: 10
     targetCPUUtilizationPercentage: 80
 - apiVersion: autoscaling/v1
   kind: HorizontalPodAutoscaler
   metadata:
-    name: overlapping2 
+    name: overlapping2
   spec:
     scaleTargetRef:
-      kind: DeploymentConfig 
-      name: frontend 
+      kind: DeploymentConfig
+      name: frontend
       apiVersion: apps.openshift.io/v1
       subresource: scale
-    minReplicas: 1 
-    maxReplicas: 10 
+    minReplicas: 1
+    maxReplicas: 10
     targetCPUUtilizationPercentage: 80
 - apiVersion: autoscaling/v1
   kind: HorizontalPodAutoscaler
   metadata:
-    name: overlapping3 
+    name: overlapping3
   spec:
     scaleTargetRef:
-      kind: DeploymentConfig 
-      name: frontend 
+      kind: DeploymentConfig
+      name: frontend
       apiVersion: apps.openshift.io/v1
       subresource: scale
-    minReplicas: 1 
-    maxReplicas: 10 
+    minReplicas: 1
+    maxReplicas: 10
     targetCPUUtilizationPercentage: 80
 - apiVersion: autoscaling/v1
   kind: HorizontalPodAutoscaler
@@ -46,12 +46,12 @@ items:
     name: dc-autoscaler
   spec:
     scaleTargetRef:
-      kind: DeploymentConfig 
-      name: someotherdc 
+      kind: DeploymentConfig
+      name: someotherdc
       apiVersion: apps.openshift.io/v1
       subresource: scale
-    minReplicas: 1 
-    maxReplicas: 10 
+    minReplicas: 1
+    maxReplicas: 10
     targetCPUUtilizationPercentage: 80
 - apiVersion: autoscaling/v1
   kind: HorizontalPodAutoscaler
@@ -59,14 +59,14 @@ items:
     name: rc-autoscaler
   spec:
     scaleTargetRef:
-      kind: ReplicationController 
-      name: someotherdc-1 
-      apiVersion: v1 
+      kind: ReplicationController
+      name: someotherdc-1
+      apiVersion: v1
       subresource: scale
-    minReplicas: 1 
+    minReplicas: 1
     maxReplicas: 10
     targetCPUUtilizationPercentage: 80
-- apiVersion: v1
+- apiVersion: apps.openshift.io/v1
   kind: DeploymentConfig
   metadata:
     creationTimestamp: null
diff --git a/pkg/oc/lib/graph/genericgraph/test/prereq-image-present-notag.yaml b/pkg/oc/lib/graph/genericgraph/test/prereq-image-present-notag.yaml
index c2b656a2747a..06cea7a1f1bc 100644
--- a/pkg/oc/lib/graph/genericgraph/test/prereq-image-present-notag.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/prereq-image-present-notag.yaml
@@ -1,6 +1,6 @@
 apiVersion: v1
 items:
-- apiVersion: v1
+- apiVersion: build.openshift.io/v1
   kind: BuildConfig
   metadata:
     creationTimestamp: null
@@ -37,7 +37,7 @@ items:
       type: ImageChange
   status:
     lastVersion: 0
-- apiVersion: v1
+- apiVersion: build.openshift.io/v1
   kind: BuildConfig
   metadata:
     creationTimestamp: null
@@ -55,7 +55,7 @@ items:
     strategy:
       dockerStrategy:
         from:
-          kind: DockerReference
+          kind: DockerImage
           name: ruby-20-centos7:latest
           namespace: openshift
       type: Docker
@@ -70,7 +70,7 @@ items:
       type: ImageChange
   status:
     lastVersion: 0
-- apiVersion: v1
+- apiVersion: build.openshift.io/v1
   kind: BuildConfig
   metadata:
     creationTimestamp: null
@@ -105,7 +105,7 @@ items:
       type: ImageChange
   status:
     lastVersion: 0
-- apiVersion: v1
+- apiVersion: build.openshift.io/v1
   kind: BuildConfig
   metadata:
     creationTimestamp: null
@@ -139,7 +139,7 @@ items:
       type: ImageChange
   status:
     lastVersion: 0
-- apiVersion: v1
+- apiVersion: build.openshift.io/v1
   kind: BuildConfig
   metadata:
     creationTimestamp: null
@@ -152,7 +152,7 @@ items:
         kind: ImageStreamTag
         name: test2:latest
     resources: {}
-    source: 
+    source:
       git:
         uri: https://github.com/openshift/origin
       type: Git
@@ -174,7 +174,7 @@ items:
       type: ImageChange
   status:
     lastVersion: 0
-- apiVersion: v1
+- apiVersion: build.openshift.io/v1
   kind: BuildConfig
   metadata:
     creationTimestamp: null
@@ -209,7 +209,7 @@ items:
       type: ImageChange
   status:
     lastVersion: 0
-- apiVersion: v1
+- apiVersion: image.openshift.io/v1
   kind: ImageStream
   metadata:
     annotations:
diff --git a/pkg/oc/lib/graph/genericgraph/test/rollingupdate-daemonset.yaml b/pkg/oc/lib/graph/genericgraph/test/rollingupdate-daemonset.yaml
index 56e41a21fc64..137255ecf630 100644
--- a/pkg/oc/lib/graph/genericgraph/test/rollingupdate-daemonset.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/rollingupdate-daemonset.yaml
@@ -1,4 +1,4 @@
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
 kind: DaemonSet
 metadata:
   name: bind
diff --git a/pkg/oc/lib/graph/genericgraph/test/runtimeobject_nodebuilder.go b/pkg/oc/lib/graph/genericgraph/test/runtimeobject_nodebuilder.go
index 191510478832..b6293a75eac2 100644
--- a/pkg/oc/lib/graph/genericgraph/test/runtimeobject_nodebuilder.go
+++ b/pkg/oc/lib/graph/genericgraph/test/runtimeobject_nodebuilder.go
@@ -2,75 +2,72 @@ package test
 
 import (
 	"fmt"
-	"path/filepath"
+	"io/ioutil"
 	"reflect"
 
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/api/meta"
-	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
 	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/client-go/rest/fake"
-	"k8s.io/client-go/restmapper"
-	"k8s.io/kubernetes/pkg/apis/autoscaling"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	"k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource"
-
-	_ "github.com/openshift/origin/pkg/api/install"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
-	buildapi "github.com/openshift/origin/pkg/build/apis/build"
-	imageapi "github.com/openshift/origin/pkg/image/apis/image"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	kubernetesscheme "k8s.io/client-go/kubernetes/scheme"
+
+	"github.com/openshift/api"
+	appsv1 "github.com/openshift/api/apps/v1"
+	buildv1 "github.com/openshift/api/build/v1"
+	imagev1 "github.com/openshift/api/image/v1"
+	routev1 "github.com/openshift/api/route/v1"
+	"github.com/openshift/origin/pkg/api/legacy"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	buildgraph "github.com/openshift/origin/pkg/oc/lib/graph/buildgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 	routegraph "github.com/openshift/origin/pkg/oc/lib/graph/routegraph/nodes"
-	"github.com/openshift/origin/pkg/oc/util/ocscheme"
-	routeapi "github.com/openshift/origin/pkg/route/apis/route"
 )
 
 // typeToEnsureMethod stores types to Ensure*Node methods
 var typeToEnsureMethod = map[reflect.Type]reflect.Value{}
 
 func init() {
-	if err := RegisterEnsureNode(&imageapi.Image{}, imagegraph.EnsureImageNode); err != nil {
+	if err := RegisterEnsureNode(&imagev1.Image{}, imagegraph.EnsureImageNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&imageapi.ImageStream{}, imagegraph.EnsureImageStreamNode); err != nil {
+	if err := RegisterEnsureNode(&imagev1.ImageStream{}, imagegraph.EnsureImageStreamNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&appsapi.DeploymentConfig{}, appsgraph.EnsureDeploymentConfigNode); err != nil {
+	if err := RegisterEnsureNode(&appsv1.DeploymentConfig{}, appsgraph.EnsureDeploymentConfigNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&buildapi.BuildConfig{}, buildgraph.EnsureBuildConfigNode); err != nil {
+	if err := RegisterEnsureNode(&buildv1.BuildConfig{}, buildgraph.EnsureBuildConfigNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&buildapi.Build{}, buildgraph.EnsureBuildNode); err != nil {
+	if err := RegisterEnsureNode(&buildv1.Build{}, buildgraph.EnsureBuildNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&routeapi.Route{}, routegraph.EnsureRouteNode); err != nil {
+	if err := RegisterEnsureNode(&routev1.Route{}, routegraph.EnsureRouteNode); err != nil {
 		panic(err)
 	}
 
-	if err := RegisterEnsureNode(&kapi.Pod{}, kubegraph.EnsurePodNode); err != nil {
+	if err := RegisterEnsureNode(&corev1.Pod{}, kubegraph.EnsurePodNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&kapi.Service{}, kubegraph.EnsureServiceNode); err != nil {
+	if err := RegisterEnsureNode(&corev1.Service{}, kubegraph.EnsureServiceNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&kapi.ServiceAccount{}, kubegraph.EnsureServiceAccountNode); err != nil {
+	if err := RegisterEnsureNode(&corev1.ServiceAccount{}, kubegraph.EnsureServiceAccountNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&kapi.Secret{}, kubegraph.EnsureSecretNode); err != nil {
+	if err := RegisterEnsureNode(&corev1.Secret{}, kubegraph.EnsureSecretNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&kapi.ReplicationController{}, kubegraph.EnsureReplicationControllerNode); err != nil {
+	if err := RegisterEnsureNode(&corev1.ReplicationController{}, kubegraph.EnsureReplicationControllerNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&kapi.PersistentVolumeClaim{}, kubegraph.EnsurePersistentVolumeClaimNode); err != nil {
+	if err := RegisterEnsureNode(&corev1.PersistentVolumeClaim{}, kubegraph.EnsurePersistentVolumeClaimNode); err != nil {
 		panic(err)
 	}
-	if err := RegisterEnsureNode(&autoscaling.HorizontalPodAutoscaler{}, kubegraph.EnsureHorizontalPodAutoscalerNode); err != nil {
+	if err := RegisterEnsureNode(&autoscalingv1.HorizontalPodAutoscaler{}, kubegraph.EnsureHorizontalPodAutoscalerNode); err != nil {
 	}
 }
 
@@ -96,7 +93,7 @@ func EnsureNode(g osgraph.Graph, obj interface{}) error {
 
 	ensureMethod, exists := typeToEnsureMethod[reflectedContainedType]
 	if !exists {
-		return fmt.Errorf("%v is not registered", reflectedContainedType)
+		return fmt.Errorf("%v is not registered: %#v", reflectedContainedType, obj)
 	}
 
 	callEnsureNode(g, reflect.ValueOf(obj), ensureMethod)
@@ -129,40 +126,36 @@ func BuildGraph(path string) (osgraph.Graph, []runtime.Object, error) {
 	g := osgraph.New()
 	objs := []runtime.Object{}
 
-	abspath, err := filepath.Abs(path)
+	data, err := ioutil.ReadFile(path)
 	if err != nil {
 		return g, objs, err
 	}
-
-	builder := resource.NewFakeBuilder(
-		func(version schema.GroupVersion) (resource.RESTClient, error) {
-			return &fake.RESTClient{}, nil
-		},
-		func() (meta.RESTMapper, error) {
-			return testrestmapper.TestOnlyStaticRESTMapper(ocscheme.ReadingInternalScheme), nil
-		},
-		func() (restmapper.CategoryExpander, error) {
-			return resource.FakeCategoryExpander, nil
-		})
-
-	r := builder.
-		WithScheme(ocscheme.ReadingInternalScheme).
-		FilenameParam(false, &resource.FilenameOptions{Recursive: false, Filenames: []string{abspath}}).
-		Flatten().
-		Do()
-
-	if r.Err() != nil {
-		return g, objs, r.Err()
-	}
-
-	infos, err := r.Infos()
+	scheme := runtime.NewScheme()
+	kubernetesscheme.AddToScheme(scheme)
+	api.Install(scheme)
+	legacy.InstallExternalLegacyAll(scheme)
+	codecs := serializer.NewCodecFactory(scheme)
+	decoder := codecs.UniversalDeserializer()
+	obj, err := runtime.Decode(decoder, data)
 	if err != nil {
 		return g, objs, err
 	}
-	for _, info := range infos {
-		objs = append(objs, info.Object)
+	if !meta.IsListType(obj) {
+		objs = []runtime.Object{obj}
+	} else {
+		list, err := meta.ExtractList(obj)
+		if err != nil {
+			return g, objs, err
+		}
+		errs := runtime.DecodeList(list, decoder)
+		if len(errs) > 0 {
+			return g, objs, errs[0]
+		}
+		objs = list
+	}
 
-		if err := EnsureNode(g, info.Object); err != nil {
+	for _, obj := range objs {
+		if err := EnsureNode(g, obj); err != nil {
 			return g, objs, err
 		}
 	}
diff --git a/pkg/oc/lib/graph/genericgraph/test/statefulset.yaml b/pkg/oc/lib/graph/genericgraph/test/statefulset.yaml
index 62aec44af7fe..8096ffe6292a 100644
--- a/pkg/oc/lib/graph/genericgraph/test/statefulset.yaml
+++ b/pkg/oc/lib/graph/genericgraph/test/statefulset.yaml
@@ -1,6 +1,6 @@
 apiVersion: v1
 items:
-- apiVersion: apps/v1beta1
+- apiVersion: apps/v1
   kind: StatefulSet
   metadata:
     creationTimestamp: 2016-07-21T15:53:09Z
diff --git a/pkg/oc/lib/graph/imagegraph/edges.go b/pkg/oc/lib/graph/imagegraph/edges.go
index f92c4a191a5e..9d6cca96f01d 100644
--- a/pkg/oc/lib/graph/imagegraph/edges.go
+++ b/pkg/oc/lib/graph/imagegraph/edges.go
@@ -3,6 +3,7 @@ package imagegraph
 import (
 	"github.com/gonum/graph"
 
+	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
@@ -18,7 +19,7 @@ const (
 // AddImageStreamTagRefEdge ensures that a directed edge exists between an IST Node and the IS it references
 func AddImageStreamTagRefEdge(g osgraph.MutableUniqueGraph, node *imagegraph.ImageStreamTagNode) {
 	isName, _, _ := imageapi.SplitImageStreamTag(node.Name)
-	imageStream := &imageapi.ImageStream{}
+	imageStream := &imagev1.ImageStream{}
 	imageStream.Namespace = node.Namespace
 	imageStream.Name = isName
 
@@ -29,7 +30,7 @@ func AddImageStreamTagRefEdge(g osgraph.MutableUniqueGraph, node *imagegraph.Ima
 // AddImageStreamImageRefEdge ensures that a directed edge exists between an ImageStreamImage Node and the IS it references
 func AddImageStreamImageRefEdge(g osgraph.MutableUniqueGraph, node *imagegraph.ImageStreamImageNode) {
 	dockImgRef, _ := imageapi.ParseDockerImageReference(node.Name)
-	imageStream := &imageapi.ImageStream{}
+	imageStream := &imagev1.ImageStream{}
 	imageStream.Namespace = node.Namespace
 	imageStream.Name = dockImgRef.Name
 
diff --git a/pkg/oc/lib/graph/imagegraph/nodes/nodes.go b/pkg/oc/lib/graph/imagegraph/nodes/nodes.go
index 243349212b0c..fbdfdcc12c41 100644
--- a/pkg/oc/lib/graph/imagegraph/nodes/nodes.go
+++ b/pkg/oc/lib/graph/imagegraph/nodes/nodes.go
@@ -5,11 +5,12 @@ import (
 
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
+	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
 
-func EnsureImageNode(g osgraph.MutableUniqueGraph, img *imageapi.Image) graph.Node {
+func EnsureImageNode(g osgraph.MutableUniqueGraph, img *imagev1.Image) graph.Node {
 	return osgraph.EnsureUnique(g,
 		ImageNodeName(img),
 		func(node osgraph.Node) graph.Node {
@@ -21,13 +22,13 @@ func EnsureImageNode(g osgraph.MutableUniqueGraph, img *imageapi.Image) graph.No
 // EnsureAllImageStreamTagNodes creates all the ImageStreamTagNodes that are guaranteed to be present based on the ImageStream.
 // This is different than inferring the presence of an object, since the IST is an object derived from a join between the ImageStream
 // and the Image it references.
-func EnsureAllImageStreamTagNodes(g osgraph.MutableUniqueGraph, is *imageapi.ImageStream) []*ImageStreamTagNode {
+func EnsureAllImageStreamTagNodes(g osgraph.MutableUniqueGraph, is *imagev1.ImageStream) []*ImageStreamTagNode {
 	ret := []*ImageStreamTagNode{}
 
-	for tag := range is.Status.Tags {
-		ist := &imageapi.ImageStreamTag{}
+	for _, tag := range is.Status.Tags {
+		ist := &imagev1.ImageStreamTag{}
 		ist.Namespace = is.Namespace
-		ist.Name = imageapi.JoinImageStreamTag(is.Name, tag)
+		ist.Name = imageapi.JoinImageStreamTag(is.Name, tag.Tag)
 
 		istNode := EnsureImageStreamTagNode(g, ist)
 		ret = append(ret, istNode)
@@ -37,7 +38,7 @@ func EnsureAllImageStreamTagNodes(g osgraph.MutableUniqueGraph, is *imageapi.Ima
 }
 
 func FindImage(g osgraph.MutableUniqueGraph, imageName string) *ImageNode {
-	n := g.Find(ImageNodeName(&imageapi.Image{ObjectMeta: metav1.ObjectMeta{Name: imageName}}))
+	n := g.Find(ImageNodeName(&imagev1.Image{ObjectMeta: metav1.ObjectMeta{Name: imageName}}))
 	if imageNode, ok := n.(*ImageNode); ok {
 		return imageNode
 	}
@@ -67,8 +68,8 @@ func EnsureDockerRepositoryNode(g osgraph.MutableUniqueGraph, name, tag string)
 
 // MakeImageStreamTagObjectMeta returns an ImageStreamTag that has enough information to join the graph, but it is not
 // based on a full IST object.  This can be used to properly initialize the graph without having to retrieve all ISTs
-func MakeImageStreamTagObjectMeta(namespace, name, tag string) *imageapi.ImageStreamTag {
-	return &imageapi.ImageStreamTag{
+func MakeImageStreamTagObjectMeta(namespace, name, tag string) *imagev1.ImageStreamTag {
+	return &imagev1.ImageStreamTag{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      imageapi.JoinImageStreamTag(name, tag),
@@ -78,8 +79,8 @@ func MakeImageStreamTagObjectMeta(namespace, name, tag string) *imageapi.ImageSt
 
 // MakeImageStreamTagObjectMeta2 returns an ImageStreamTag that has enough information to join the graph, but it is not
 // based on a full IST object.  This can be used to properly initialize the graph without having to retrieve all ISTs
-func MakeImageStreamTagObjectMeta2(namespace, name string) *imageapi.ImageStreamTag {
-	return &imageapi.ImageStreamTag{
+func MakeImageStreamTagObjectMeta2(namespace, name string) *imagev1.ImageStreamTag {
+	return &imagev1.ImageStreamTag{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
@@ -88,7 +89,7 @@ func MakeImageStreamTagObjectMeta2(namespace, name string) *imageapi.ImageStream
 }
 
 // EnsureImageStreamTagNode adds a graph node for the specific tag in an Image Stream if it does not already exist.
-func EnsureImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *imageapi.ImageStreamTag) *ImageStreamTagNode {
+func EnsureImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *imagev1.ImageStreamTag) *ImageStreamTagNode {
 	return osgraph.EnsureUnique(g,
 		ImageStreamTagNodeName(ist),
 		func(node osgraph.Node) graph.Node {
@@ -98,7 +99,7 @@ func EnsureImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *imageapi.ImageS
 }
 
 // FindOrCreateSyntheticImageStreamTagNode returns the existing ISTNode or creates a synthetic node in its place
-func FindOrCreateSyntheticImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *imageapi.ImageStreamTag) *ImageStreamTagNode {
+func FindOrCreateSyntheticImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *imagev1.ImageStreamTag) *ImageStreamTagNode {
 	return osgraph.EnsureUnique(g,
 		ImageStreamTagNodeName(ist),
 		func(node osgraph.Node) graph.Node {
@@ -109,8 +110,8 @@ func FindOrCreateSyntheticImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *
 
 // MakeImageStreamImageObjectMeta returns an ImageStreamImage that has enough information to join the graph, but it is not
 // based on a full ISI object.  This can be used to properly initialize the graph without having to retrieve all ISIs
-func MakeImageStreamImageObjectMeta(namespace, name string) *imageapi.ImageStreamImage {
-	return &imageapi.ImageStreamImage{
+func MakeImageStreamImageObjectMeta(namespace, name string) *imagev1.ImageStreamImage {
+	return &imagev1.ImageStreamImage{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
@@ -121,7 +122,7 @@ func MakeImageStreamImageObjectMeta(namespace, name string) *imageapi.ImageStrea
 // EnsureImageStreamImageNode adds a graph node for the specific ImageStreamImage if it
 // does not already exist.
 func EnsureImageStreamImageNode(g osgraph.MutableUniqueGraph, namespace, name string) graph.Node {
-	isi := &imageapi.ImageStreamImage{
+	isi := &imagev1.ImageStreamImage{
 		ObjectMeta: metav1.ObjectMeta{
 			Namespace: namespace,
 			Name:      name,
@@ -136,7 +137,7 @@ func EnsureImageStreamImageNode(g osgraph.MutableUniqueGraph, namespace, name st
 }
 
 // FindOrCreateSyntheticImageStreamImageNode returns the existing ISINode or creates a synthetic node in its place
-func FindOrCreateSyntheticImageStreamImageNode(g osgraph.MutableUniqueGraph, isi *imageapi.ImageStreamImage) *ImageStreamImageNode {
+func FindOrCreateSyntheticImageStreamImageNode(g osgraph.MutableUniqueGraph, isi *imagev1.ImageStreamImage) *ImageStreamImageNode {
 	return osgraph.EnsureUnique(g,
 		ImageStreamImageNodeName(isi),
 		func(node osgraph.Node) graph.Node {
@@ -146,7 +147,7 @@ func FindOrCreateSyntheticImageStreamImageNode(g osgraph.MutableUniqueGraph, isi
 }
 
 // EnsureImageStreamNode adds a graph node for the Image Stream if it does not already exist.
-func EnsureImageStreamNode(g osgraph.MutableUniqueGraph, is *imageapi.ImageStream) graph.Node {
+func EnsureImageStreamNode(g osgraph.MutableUniqueGraph, is *imagev1.ImageStream) graph.Node {
 	return osgraph.EnsureUnique(g,
 		ImageStreamNodeName(is),
 		func(node osgraph.Node) graph.Node {
@@ -156,7 +157,7 @@ func EnsureImageStreamNode(g osgraph.MutableUniqueGraph, is *imageapi.ImageStrea
 }
 
 // FindOrCreateSyntheticImageStreamNode returns the existing ISNode or creates a synthetic node in its place
-func FindOrCreateSyntheticImageStreamNode(g osgraph.MutableUniqueGraph, is *imageapi.ImageStream) *ImageStreamNode {
+func FindOrCreateSyntheticImageStreamNode(g osgraph.MutableUniqueGraph, is *imagev1.ImageStream) *ImageStreamNode {
 	return osgraph.EnsureUnique(g,
 		ImageStreamNodeName(is),
 		func(node osgraph.Node) graph.Node {
diff --git a/pkg/oc/lib/graph/imagegraph/nodes/types.go b/pkg/oc/lib/graph/imagegraph/nodes/types.go
index 6b26f101cca7..57569fe8952b 100644
--- a/pkg/oc/lib/graph/imagegraph/nodes/types.go
+++ b/pkg/oc/lib/graph/imagegraph/nodes/types.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"reflect"
 
+	imagev1 "github.com/openshift/api/image/v1"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
@@ -19,22 +20,22 @@ const (
 )
 
 var (
-	ImageStreamNodeKind      = reflect.TypeOf(imageapi.ImageStream{}).Name()
-	ImageNodeKind            = reflect.TypeOf(imageapi.Image{}).Name()
-	ImageStreamTagNodeKind   = reflect.TypeOf(imageapi.ImageStreamTag{}).Name()
-	ImageStreamImageNodeKind = reflect.TypeOf(imageapi.ImageStreamImage{}).Name()
+	ImageStreamNodeKind      = reflect.TypeOf(imagev1.ImageStream{}).Name()
+	ImageNodeKind            = reflect.TypeOf(imagev1.Image{}).Name()
+	ImageStreamTagNodeKind   = reflect.TypeOf(imagev1.ImageStreamTag{}).Name()
+	ImageStreamImageNodeKind = reflect.TypeOf(imagev1.ImageStreamImage{}).Name()
 
 	// non-api types
-	DockerRepositoryNodeKind = reflect.TypeOf(imageapi.DockerImageReference{}).Name()
+	DockerRepositoryNodeKind = reflect.TypeOf(imagev1.DockerImageReference{}).Name()
 )
 
-func ImageStreamNodeName(o *imageapi.ImageStream) osgraph.UniqueName {
+func ImageStreamNodeName(o *imagev1.ImageStream) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ImageStreamNodeKind, o)
 }
 
 type ImageStreamNode struct {
 	osgraph.Node
-	*imageapi.ImageStream
+	*imagev1.ImageStream
 
 	IsFound bool
 }
@@ -59,13 +60,13 @@ func (*ImageStreamNode) Kind() string {
 	return ImageStreamNodeKind
 }
 
-func ImageStreamTagNodeName(o *imageapi.ImageStreamTag) osgraph.UniqueName {
+func ImageStreamTagNodeName(o *imagev1.ImageStreamTag) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ImageStreamTagNodeKind, o)
 }
 
 type ImageStreamTagNode struct {
 	osgraph.Node
-	*imageapi.ImageStreamTag
+	*imagev1.ImageStreamTag
 
 	IsFound bool
 }
@@ -100,13 +101,13 @@ func (*ImageStreamTagNode) Kind() string {
 	return ImageStreamTagNodeKind
 }
 
-func ImageStreamImageNodeName(o *imageapi.ImageStreamImage) osgraph.UniqueName {
+func ImageStreamImageNodeName(o *imagev1.ImageStreamImage) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ImageStreamImageNodeKind, o)
 }
 
 type ImageStreamImageNode struct {
 	osgraph.Node
-	*imageapi.ImageStreamImage
+	*imagev1.ImageStreamImage
 
 	IsFound bool
 }
@@ -169,13 +170,13 @@ func (n DockerImageRepositoryNode) UniqueName() osgraph.UniqueName {
 	return DockerImageRepositoryNodeName(n.Ref)
 }
 
-func ImageNodeName(o *imageapi.Image) osgraph.UniqueName {
+func ImageNodeName(o *imagev1.Image) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ImageNodeKind, o)
 }
 
 type ImageNode struct {
 	osgraph.Node
-	Image *imageapi.Image
+	Image *imagev1.Image
 }
 
 func (n ImageNode) Object() interface{} {
diff --git a/pkg/oc/lib/graph/kubegraph/analysis/hpa.go b/pkg/oc/lib/graph/kubegraph/analysis/hpa.go
index 75494cb65224..5f72b88d8ecd 100644
--- a/pkg/oc/lib/graph/kubegraph/analysis/hpa.go
+++ b/pkg/oc/lib/graph/kubegraph/analysis/hpa.go
@@ -4,18 +4,15 @@ import (
 	"fmt"
 	"strings"
 
-	"k8s.io/apimachinery/pkg/util/sets"
-	"k8s.io/kubernetes/pkg/apis/autoscaling"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-
 	graphapi "github.com/gonum/graph"
 	"github.com/gonum/graph/path"
 
-	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
+	"k8s.io/apimachinery/pkg/util/sets"
+
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	appsnodes "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	"github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
-	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
 	kubenodes "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
@@ -38,15 +35,7 @@ func FindHPASpecsMissingCPUTargets(graph osgraph.Graph, namer osgraph.Namer) []o
 	for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) {
 		node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode)
 
-		cpuFound := false
-		for _, metric := range node.HorizontalPodAutoscaler.Spec.Metrics {
-			if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == kapi.ResourceCPU {
-				cpuFound = true
-				break
-			}
-		}
-
-		if !cpuFound {
+		if node.HorizontalPodAutoscaler.Spec.TargetCPUUtilizationPercentage == nil {
 			markers = append(markers, osgraph.Marker{
 				Node:       node,
 				Severity:   osgraph.ErrorSeverity,
@@ -128,7 +117,7 @@ func FindOverlappingHPAs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Mar
 	edgeFilter := osgraph.EdgesOfKind(
 		kubegraph.ScalingEdgeKind,
 		appsgraph.DeploymentEdgeKind,
-		kubeedges.ManagedByControllerEdgeKind,
+		appsgraph.ManagedByControllerEdgeKind,
 	)
 
 	hpaSubGraph := graph.Subgraph(nodeFilter, edgeFilter)
diff --git a/pkg/oc/lib/graph/kubegraph/analysis/hpa_test.go b/pkg/oc/lib/graph/kubegraph/analysis/hpa_test.go
index 9c674ccc79ff..adec40220a24 100644
--- a/pkg/oc/lib/graph/kubegraph/analysis/hpa_test.go
+++ b/pkg/oc/lib/graph/kubegraph/analysis/hpa_test.go
@@ -5,8 +5,10 @@ import (
 	"testing"
 
 	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
-	"k8s.io/kubernetes/pkg/api/legacyscheme"
+	"k8s.io/apimachinery/pkg/runtime"
+	kubernetesscheme "k8s.io/client-go/kubernetes/scheme"
 
+	"github.com/openshift/api"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	osgraphtest "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph/test"
@@ -64,7 +66,10 @@ func TestOverlappingHPAsWarning(t *testing.T) {
 		t.Fatalf("unexpected error: %v", err)
 	}
 
-	kubegraph.AddHPAScaleRefEdges(g, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme))
+	scheme := runtime.NewScheme()
+	kubernetesscheme.AddToScheme(scheme)
+	api.Install(scheme)
+	kubegraph.AddHPAScaleRefEdges(g, testrestmapper.TestOnlyStaticRESTMapper(scheme))
 	appsgraph.AddAllDeploymentConfigsDeploymentEdges(g)
 
 	markers := FindOverlappingHPAs(g, osgraph.DefaultNamer)
@@ -89,7 +94,10 @@ func TestOverlappingLegacyHPAsWarning(t *testing.T) {
 		t.Fatalf("unexpected error: %v", err)
 	}
 
-	kubegraph.AddHPAScaleRefEdges(g, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme))
+	scheme := runtime.NewScheme()
+	kubernetesscheme.AddToScheme(scheme)
+	api.Install(scheme)
+	kubegraph.AddHPAScaleRefEdges(g, testrestmapper.TestOnlyStaticRESTMapper(scheme))
 	appsgraph.AddAllDeploymentConfigsDeploymentEdges(g)
 
 	markers := FindOverlappingHPAs(g, osgraph.DefaultNamer)
diff --git a/pkg/oc/lib/graph/kubegraph/analysis/pod.go b/pkg/oc/lib/graph/kubegraph/analysis/pod.go
index 7d2928bd0dc6..e3dfaab7c16d 100644
--- a/pkg/oc/lib/graph/kubegraph/analysis/pod.go
+++ b/pkg/oc/lib/graph/kubegraph/analysis/pod.go
@@ -6,8 +6,8 @@ import (
 
 	"github.com/MakeNowJust/heredoc"
 
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
 
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
@@ -33,7 +33,7 @@ func FindRestartingPods(g osgraph.Graph, f osgraph.Namer, logsCommandName, secur
 
 	for _, uncastPodNode := range g.NodesByKind(kubegraph.PodNodeKind) {
 		podNode := uncastPodNode.(*kubegraph.PodNode)
-		pod, ok := podNode.Object().(*kapi.Pod)
+		pod, ok := podNode.Object().(*corev1.Pod)
 		if !ok {
 			continue
 		}
@@ -102,7 +102,7 @@ func FindRestartingPods(g osgraph.Graph, f osgraph.Namer, logsCommandName, secur
 	return markers
 }
 
-func containerIsNonRoot(pod *kapi.Pod, container string) bool {
+func containerIsNonRoot(pod *corev1.Pod, container string) bool {
 	for _, c := range pod.Spec.Containers {
 		if c.Name != container || c.SecurityContext == nil {
 			continue
@@ -116,11 +116,11 @@ func containerIsNonRoot(pod *kapi.Pod, container string) bool {
 	return false
 }
 
-func containerCrashLoopBackOff(status kapi.ContainerStatus) bool {
+func containerCrashLoopBackOff(status corev1.ContainerStatus) bool {
 	return status.State.Waiting != nil && status.State.Waiting.Reason == "CrashLoopBackOff"
 }
 
-func ContainerRestartedRecently(status kapi.ContainerStatus, now metav1.Time) bool {
+func ContainerRestartedRecently(status corev1.ContainerStatus, now metav1.Time) bool {
 	if status.RestartCount == 0 {
 		return false
 	}
@@ -130,6 +130,6 @@ func ContainerRestartedRecently(status kapi.ContainerStatus, now metav1.Time) bo
 	return false
 }
 
-func containerRestartedFrequently(status kapi.ContainerStatus) bool {
+func containerRestartedFrequently(status corev1.ContainerStatus) bool {
 	return status.RestartCount > RestartThreshold
 }
diff --git a/pkg/oc/lib/graph/kubegraph/analysis/podspec.go b/pkg/oc/lib/graph/kubegraph/analysis/podspec.go
index 8e56afebb408..3e3dcd4753d5 100644
--- a/pkg/oc/lib/graph/kubegraph/analysis/podspec.go
+++ b/pkg/oc/lib/graph/kubegraph/analysis/podspec.go
@@ -5,6 +5,7 @@ import (
 
 	"github.com/gonum/graph"
 
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
@@ -144,7 +145,7 @@ func hasControllerOwnerReference(node graph.Node) bool {
 
 // hasControllerRefEdge returns true if a given node contains one or more "ManagedByController" outbound edges
 func hasControllerRefEdge(g osgraph.Graph, node graph.Node) bool {
-	managedEdges := g.OutboundEdges(node, kubeedges.ManagedByControllerEdgeKind)
+	managedEdges := g.OutboundEdges(node, appsgraph.ManagedByControllerEdgeKind)
 	return len(managedEdges) > 0
 }
 
diff --git a/pkg/oc/lib/graph/kubegraph/analysis/rc.go b/pkg/oc/lib/graph/kubegraph/analysis/rc.go
index 108505a98d71..8ecaa2862aba 100644
--- a/pkg/oc/lib/graph/kubegraph/analysis/rc.go
+++ b/pkg/oc/lib/graph/kubegraph/analysis/rc.go
@@ -6,8 +6,8 @@ import (
 
 	"github.com/gonum/graph"
 
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	kubeedges "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 )
 
@@ -21,11 +21,11 @@ func FindDuelingReplicationControllers(g osgraph.Graph, f osgraph.Namer) []osgra
 	for _, uncastRCNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) {
 		rcNode := uncastRCNode.(*kubegraph.ReplicationControllerNode)
 
-		for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) {
+		for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, appsgraph.ManagedByControllerEdgeKind) {
 			podNode := uncastPodNode.(*kubegraph.PodNode)
 
 			// check to see if this pod is managed by more than one RC
-			uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind)
+			uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, appsgraph.ManagedByControllerEdgeKind)
 			if len(uncastOwningRCs) > 1 {
 				involvedRCNames := []string{}
 				relatedNodes := []graph.Node{uncastPodNode}
diff --git a/pkg/oc/lib/graph/kubegraph/edge_test.go b/pkg/oc/lib/graph/kubegraph/edge_test.go
index ed28a67d1b53..2bdcc539b325 100644
--- a/pkg/oc/lib/graph/kubegraph/edge_test.go
+++ b/pkg/oc/lib/graph/kubegraph/edge_test.go
@@ -6,17 +6,17 @@ import (
 
 	"github.com/gonum/graph"
 
+	kappsv1 "k8s.io/api/apps/v1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/kubernetes/pkg/api/legacyscheme"
-	kapps "k8s.io/kubernetes/pkg/apis/apps"
-	"k8s.io/kubernetes/pkg/apis/autoscaling"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
 	_ "k8s.io/kubernetes/pkg/apis/core/install"
 
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
+	appsv1 "github.com/openshift/api/apps/v1"
 	_ "github.com/openshift/origin/pkg/apps/apis/apps/install"
 	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
@@ -31,19 +31,19 @@ func TestNamespaceEdgeMatching(t *testing.T) {
 	g := osgraph.New()
 
 	fn := func(namespace string, g osgraph.Interface) {
-		pod := &kapi.Pod{}
+		pod := &corev1.Pod{}
 		pod.Namespace = namespace
 		pod.Name = "the-pod"
 		pod.Labels = map[string]string{"a": "1"}
 		kubegraph.EnsurePodNode(g, pod)
 
-		rc := &kapi.ReplicationController{}
+		rc := &corev1.ReplicationController{}
 		rc.Namespace = namespace
 		rc.Name = "the-rc"
 		rc.Spec.Selector = map[string]string{"a": "1"}
 		kubegraph.EnsureReplicationControllerNode(g, rc)
 
-		p := &kapps.StatefulSet{}
+		p := &kappsv1.StatefulSet{}
 		p.Namespace = namespace
 		p.Name = "the-statefulset"
 		p.Spec.Selector = &metav1.LabelSelector{
@@ -51,7 +51,7 @@ func TestNamespaceEdgeMatching(t *testing.T) {
 		}
 		kubegraph.EnsureStatefulSetNode(g, p)
 
-		svc := &kapi.Service{}
+		svc := &corev1.Service{}
 		svc.Namespace = namespace
 		svc.Name = "the-svc"
 		svc.Spec.Selector = map[string]string{"a": "1"}
@@ -88,13 +88,13 @@ func namespaceFor(node graph.Node) (string, error) {
 			return "", err
 		}
 		return meta.GetNamespace(), nil
-	case *kapi.PodSpec:
+	case *corev1.PodSpec:
 		return node.(*kubegraph.PodSpecNode).Namespace, nil
-	case *kapi.ReplicationControllerSpec:
+	case *corev1.ReplicationControllerSpec:
 		return node.(*kubegraph.ReplicationControllerSpecNode).Namespace, nil
-	case *kapps.StatefulSetSpec:
+	case *kappsv1.StatefulSetSpec:
 		return node.(*kubegraph.StatefulSetSpecNode).Namespace, nil
-	case *kapi.PodTemplateSpec:
+	case *corev1.PodTemplateSpec:
 		return node.(*kubegraph.PodTemplateSpecNode).Namespace, nil
 	default:
 		return "", fmt.Errorf("unknown object: %#v", obj)
@@ -102,19 +102,19 @@ func namespaceFor(node graph.Node) (string, error) {
 }
 
 func TestSecretEdges(t *testing.T) {
-	sa := &kapi.ServiceAccount{}
+	sa := &corev1.ServiceAccount{}
 	sa.Namespace = "ns"
 	sa.Name = "shultz"
-	sa.Secrets = []kapi.ObjectReference{{Name: "i-know-nothing"}, {Name: "missing"}}
+	sa.Secrets = []corev1.ObjectReference{{Name: "i-know-nothing"}, {Name: "missing"}}
 
-	secret1 := &kapi.Secret{}
+	secret1 := &corev1.Secret{}
 	secret1.Namespace = "ns"
 	secret1.Name = "i-know-nothing"
 
-	pod := &kapi.Pod{}
+	pod := &corev1.Pod{}
 	pod.Namespace = "ns"
 	pod.Name = "the-pod"
-	pod.Spec.Volumes = []kapi.Volume{{Name: "rose", VolumeSource: kapi.VolumeSource{Secret: &kapi.SecretVolumeSource{SecretName: "i-know-nothing"}}}}
+	pod.Spec.Volumes = []corev1.Volume{{Name: "rose", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "i-know-nothing"}}}}
 
 	g := osgraph.New()
 	saNode := kubegraph.EnsureServiceAccountNode(g, sa)
@@ -147,17 +147,17 @@ func TestSecretEdges(t *testing.T) {
 }
 
 func TestHPARCEdges(t *testing.T) {
-	hpa := &autoscaling.HorizontalPodAutoscaler{}
+	hpa := &autoscalingv1.HorizontalPodAutoscaler{}
 	hpa.Namespace = "test-ns"
 	hpa.Name = "test-hpa"
-	hpa.Spec = autoscaling.HorizontalPodAutoscalerSpec{
-		ScaleTargetRef: autoscaling.CrossVersionObjectReference{
+	hpa.Spec = autoscalingv1.HorizontalPodAutoscalerSpec{
+		ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
 			Name: "test-rc",
 			Kind: "ReplicationController",
 		},
 	}
 
-	rc := &kapi.ReplicationController{}
+	rc := &corev1.ReplicationController{}
 	rc.Name = "test-rc"
 	rc.Namespace = "test-ns"
 
@@ -177,18 +177,18 @@ func TestHPARCEdges(t *testing.T) {
 }
 
 func TestHPADCEdges(t *testing.T) {
-	hpa := &autoscaling.HorizontalPodAutoscaler{}
+	hpa := &autoscalingv1.HorizontalPodAutoscaler{}
 	hpa.Namespace = "test-ns"
 	hpa.Name = "test-hpa"
-	hpa.Spec = autoscaling.HorizontalPodAutoscalerSpec{
-		ScaleTargetRef: autoscaling.CrossVersionObjectReference{
+	hpa.Spec = autoscalingv1.HorizontalPodAutoscalerSpec{
+		ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
 			Name:       "test-dc",
 			Kind:       "DeploymentConfig",
 			APIVersion: "apps.openshift.io/v1",
 		},
 	}
 
-	dc := &appsapi.DeploymentConfig{}
+	dc := &appsv1.DeploymentConfig{}
 	dc.Name = "test-dc"
 	dc.Namespace = "test-ns"
 
diff --git a/pkg/oc/lib/graph/kubegraph/edges.go b/pkg/oc/lib/graph/kubegraph/edges.go
index b9e468c121e1..5b79e6ff44a4 100644
--- a/pkg/oc/lib/graph/kubegraph/edges.go
+++ b/pkg/oc/lib/graph/kubegraph/edges.go
@@ -6,21 +6,22 @@ import (
 
 	"github.com/gonum/graph"
 
+	kappsv1 "k8s.io/api/apps/v1"
+	corev1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/api/meta"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	"k8s.io/kubernetes/pkg/apis/extensions"
 
 	oapps "github.com/openshift/api/apps"
+	appsv1 "github.com/openshift/api/apps/v1"
 	"github.com/openshift/origin/pkg/api/legacy"
-	appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
 	imageapi "github.com/openshift/origin/pkg/image/apis/image"
 	triggerapi "github.com/openshift/origin/pkg/image/apis/image/v1/trigger"
 	"github.com/openshift/origin/pkg/image/trigger/annotations"
-	appsgraph "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
+	"github.com/openshift/origin/pkg/oc/lib/graph/appsgraph"
+	appsnodes "github.com/openshift/origin/pkg/oc/lib/graph/appsgraph/nodes"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	imagegraph "github.com/openshift/origin/pkg/oc/lib/graph/imagegraph/nodes"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
@@ -29,8 +30,6 @@ import (
 const (
 	// ExposedThroughServiceEdgeKind goes from a PodTemplateSpec or a Pod to Service.  The head should make the service's selector.
 	ExposedThroughServiceEdgeKind = "ExposedThroughService"
-	// ManagedByControllerEdgeKind goes from Pod to controller when the Pod satisfies a controller's label selector
-	ManagedByControllerEdgeKind = "ManagedByController"
 	// MountedSecretEdgeKind goes from PodSpec to Secret indicating that is or will be a request to mount a volume with the Secret.
 	MountedSecretEdgeKind = "MountedSecret"
 	// MountableSecretEdgeKind goes from ServiceAccount to Secret indicating that the SA allows the Secret to be mounted
@@ -120,7 +119,7 @@ func AddManagedByControllerPodEdges(g osgraph.MutableUniqueGraph, to graph.Node,
 				continue
 			}
 			if query.Matches(labels.Set(target.Labels)) {
-				g.AddEdge(target, to, ManagedByControllerEdgeKind)
+				g.AddEdge(target, to, appsgraph.ManagedByControllerEdgeKind)
 			}
 		}
 	}
@@ -134,12 +133,23 @@ func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) {
 		case *kubegraph.ReplicationControllerNode:
 			AddManagedByControllerPodEdges(g, cast, cast.ReplicationController.Namespace, cast.ReplicationController.Spec.Selector)
 		case *kubegraph.ReplicaSetNode:
-			AddManagedByControllerPodEdges(g, cast, cast.ReplicaSet.Namespace, cast.ReplicaSet.Spec.Selector.MatchLabels)
+			selector := make(map[string]string)
+			if cast.ReplicaSet.Spec.Selector != nil {
+				selector = cast.ReplicaSet.Spec.Selector.MatchLabels
+			}
+			AddManagedByControllerPodEdges(g, cast, cast.ReplicaSet.Namespace, selector)
 		case *kubegraph.StatefulSetNode:
-			// TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments)
-			AddManagedByControllerPodEdges(g, cast, cast.StatefulSet.Namespace, cast.StatefulSet.Spec.Selector.MatchLabels)
+			selector := make(map[string]string)
+			if cast.StatefulSet.Spec.Selector != nil {
+				selector = cast.StatefulSet.Spec.Selector.MatchLabels
+			}
+			AddManagedByControllerPodEdges(g, cast, cast.StatefulSet.Namespace, selector)
 		case *kubegraph.DaemonSetNode:
-			AddManagedByControllerPodEdges(g, cast, cast.DaemonSet.Namespace, cast.DaemonSet.Spec.Selector.MatchLabels)
+			selector := make(map[string]string)
+			if cast.DaemonSet.Spec.Selector != nil {
+				selector = cast.DaemonSet.Spec.Selector.MatchLabels
+			}
+			AddManagedByControllerPodEdges(g, cast, cast.DaemonSet.Namespace, selector)
 		}
 	}
 }
@@ -162,7 +172,7 @@ func AddMountedSecretEdges(g osgraph.Graph, podSpec *kubegraph.PodSpecNode) {
 		}
 
 		// pod secrets must be in the same namespace
-		syntheticSecret := &kapi.Secret{}
+		syntheticSecret := &corev1.Secret{}
 		syntheticSecret.Namespace = meta.GetNamespace()
 		syntheticSecret.Name = source.Secret.SecretName
 
@@ -181,7 +191,7 @@ func AddAllMountedSecretEdges(g osgraph.Graph) {
 
 func AddMountableSecretEdges(g osgraph.Graph, saNode *kubegraph.ServiceAccountNode) {
 	for _, mountableSecret := range saNode.ServiceAccount.Secrets {
-		syntheticSecret := &kapi.Secret{}
+		syntheticSecret := &corev1.Secret{}
 		syntheticSecret.Namespace = saNode.ServiceAccount.Namespace
 		syntheticSecret.Name = mountableSecret.Name
 
@@ -214,7 +224,7 @@ func AddRequestedServiceAccountEdges(g osgraph.Graph, podSpecNode *kubegraph.Pod
 		name = podSpecNode.ServiceAccountName
 	}
 
-	syntheticSA := &kapi.ServiceAccount{}
+	syntheticSA := &corev1.ServiceAccount{}
 	syntheticSA.Namespace = meta.GetNamespace()
 	syntheticSA.Name = name
 
@@ -255,16 +265,16 @@ func AddHPAScaleRefEdges(g osgraph.Graph, restMapper meta.RESTMapper) {
 		var syntheticNode graph.Node
 		r := groupVersionResource.GroupResource()
 		switch r {
-		case kapi.Resource("replicationcontrollers"):
-			syntheticNode = kubegraph.FindOrCreateSyntheticReplicationControllerNode(g, &kapi.ReplicationController{ObjectMeta: syntheticMeta})
+		case corev1.Resource("replicationcontrollers"):
+			syntheticNode = kubegraph.FindOrCreateSyntheticReplicationControllerNode(g, &corev1.ReplicationController{ObjectMeta: syntheticMeta})
 		case oapps.Resource("deploymentconfigs"),
 			// we need the legacy resource until we stop supporting HPA having old refs
 			legacy.Resource("deploymentconfigs"):
-			syntheticNode = appsgraph.FindOrCreateSyntheticDeploymentConfigNode(g, &appsapi.DeploymentConfig{ObjectMeta: syntheticMeta})
-		case extensions.Resource("deployments"):
-			syntheticNode = kubegraph.FindOrCreateSyntheticDeploymentNode(g, &extensions.Deployment{ObjectMeta: syntheticMeta})
-		case extensions.Resource("replicasets"):
-			syntheticNode = kubegraph.FindOrCreateSyntheticReplicaSetNode(g, &extensions.ReplicaSet{ObjectMeta: syntheticMeta})
+			syntheticNode = appsnodes.FindOrCreateSyntheticDeploymentConfigNode(g, &appsv1.DeploymentConfig{ObjectMeta: syntheticMeta})
+		case kappsv1.Resource("deployments"):
+			syntheticNode = kubegraph.FindOrCreateSyntheticDeploymentNode(g, &kappsv1.Deployment{ObjectMeta: syntheticMeta})
+		case kappsv1.Resource("replicasets"):
+			syntheticNode = kubegraph.FindOrCreateSyntheticReplicaSetNode(g, &kappsv1.ReplicaSet{ObjectMeta: syntheticMeta})
 		default:
 			continue
 		}
@@ -273,7 +283,7 @@ func AddHPAScaleRefEdges(g osgraph.Graph, restMapper meta.RESTMapper) {
 	}
 }
 
-func addTriggerEdges(obj runtime.Object, podTemplate kapi.PodTemplateSpec, addEdgeFn func(image appsapi.TemplateImage, err error)) {
+func addTriggerEdges(obj runtime.Object, podTemplate corev1.PodTemplateSpec, addEdgeFn func(image appsgraph.TemplateImage, err error)) {
 	m, err := meta.Accessor(obj)
 	if err != nil {
 		return
@@ -286,8 +296,8 @@ func addTriggerEdges(obj runtime.Object, podTemplate kapi.PodTemplateSpec, addEd
 	if err := json.Unmarshal([]byte(triggerAnnotation), &triggers); err != nil {
 		return
 	}
-	triggerFn := func(container *kapi.Container) (appsapi.TemplateImage, bool) {
-		from := kapi.ObjectReference{}
+	triggerFn := func(container *corev1.Container) (appsgraph.TemplateImage, bool) {
+		from := corev1.ObjectReference{}
 		for _, trigger := range triggers {
 			c, remainder, err := annotations.ContainerForObjectFieldPath(obj, trigger.FieldPath)
 			if err != nil || remainder != "image" {
@@ -302,18 +312,18 @@ func addTriggerEdges(obj runtime.Object, podTemplate kapi.PodTemplateSpec, addEd
 			if len(from.Kind) == 0 {
 				from.Kind = "ImageStreamTag"
 			}
-			return appsapi.TemplateImage{
+			return appsgraph.TemplateImage{
 				Image: c.GetImage(),
 				From:  &from,
 			}, true
 		}
-		return appsapi.TemplateImage{}, false
+		return appsgraph.TemplateImage{}, false
 	}
-	appsapi.EachTemplateImage(&podTemplate.Spec, triggerFn, addEdgeFn)
+	appsgraph.EachTemplateImage(&podTemplate.Spec, triggerFn, addEdgeFn)
 }
 
 func AddTriggerStatefulSetsEdges(g osgraph.MutableUniqueGraph, node *kubegraph.StatefulSetNode) *kubegraph.StatefulSetNode {
-	addTriggerEdges(node.StatefulSet, node.StatefulSet.Spec.Template, func(image appsapi.TemplateImage, err error) {
+	addTriggerEdges(node.StatefulSet, node.StatefulSet.Spec.Template, func(image appsgraph.TemplateImage, err error) {
 		if err != nil {
 			return
 		}
@@ -344,7 +354,7 @@ func AddAllTriggerStatefulSetsEdges(g osgraph.MutableUniqueGraph) {
 }
 
 func AddTriggerDeploymentsEdges(g osgraph.MutableUniqueGraph, node *kubegraph.DeploymentNode) *kubegraph.DeploymentNode {
-	addTriggerEdges(node.Deployment, node.Deployment.Spec.Template, func(image appsapi.TemplateImage, err error) {
+	addTriggerEdges(node.Deployment, node.Deployment.Spec.Template, func(image appsgraph.TemplateImage, err error) {
 		if err != nil {
 			return
 		}
@@ -381,7 +391,7 @@ func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *kubegraph.Deployment
 			}
 			if BelongsToDeployment(node.Deployment, rsNode.ReplicaSet) {
 				g.AddEdge(node, rsNode, DeploymentEdgeKind)
-				g.AddEdge(rsNode, node, ManagedByControllerEdgeKind)
+				g.AddEdge(rsNode, node, appsgraph.ManagedByControllerEdgeKind)
 			}
 		}
 	}
@@ -389,7 +399,7 @@ func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *kubegraph.Deployment
 	return node
 }
 
-func BelongsToDeployment(config *extensions.Deployment, b *extensions.ReplicaSet) bool {
+func BelongsToDeployment(config *kappsv1.Deployment, b *kappsv1.ReplicaSet) bool {
 	if b.OwnerReferences == nil {
 		return false
 	}
diff --git a/pkg/oc/lib/graph/kubegraph/nodes/nodes.go b/pkg/oc/lib/graph/kubegraph/nodes/nodes.go
index 69ae5fa99c18..4ff50c94d098 100644
--- a/pkg/oc/lib/graph/kubegraph/nodes/nodes.go
+++ b/pkg/oc/lib/graph/kubegraph/nodes/nodes.go
@@ -2,16 +2,15 @@ package nodes
 
 import (
 	"github.com/gonum/graph"
-	"k8s.io/kubernetes/pkg/apis/extensions"
 
-	kapps "k8s.io/kubernetes/pkg/apis/apps"
-	"k8s.io/kubernetes/pkg/apis/autoscaling"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	kappsv1 "k8s.io/api/apps/v1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	corev1 "k8s.io/api/core/v1"
 
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
 
-func EnsurePodNode(g osgraph.MutableUniqueGraph, pod *kapi.Pod) *PodNode {
+func EnsurePodNode(g osgraph.MutableUniqueGraph, pod *corev1.Pod) *PodNode {
 	podNodeName := PodNodeName(pod)
 	podNode := osgraph.EnsureUnique(g,
 		podNodeName,
@@ -26,7 +25,7 @@ func EnsurePodNode(g osgraph.MutableUniqueGraph, pod *kapi.Pod) *PodNode {
 	return podNode
 }
 
-func EnsurePodSpecNode(g osgraph.MutableUniqueGraph, podSpec *kapi.PodSpec, namespace string, ownerName osgraph.UniqueName) *PodSpecNode {
+func EnsurePodSpecNode(g osgraph.MutableUniqueGraph, podSpec *corev1.PodSpec, namespace string, ownerName osgraph.UniqueName) *PodSpecNode {
 	return osgraph.EnsureUnique(g,
 		PodSpecNodeName(podSpec, ownerName),
 		func(node osgraph.Node) graph.Node {
@@ -36,7 +35,7 @@ func EnsurePodSpecNode(g osgraph.MutableUniqueGraph, podSpec *kapi.PodSpec, name
 }
 
 // EnsureServiceNode adds the provided service to the graph if it does not already exist.
-func EnsureServiceNode(g osgraph.MutableUniqueGraph, svc *kapi.Service) *ServiceNode {
+func EnsureServiceNode(g osgraph.MutableUniqueGraph, svc *corev1.Service) *ServiceNode {
 	return osgraph.EnsureUnique(g,
 		ServiceNodeName(svc),
 		func(node osgraph.Node) graph.Node {
@@ -46,7 +45,7 @@ func EnsureServiceNode(g osgraph.MutableUniqueGraph, svc *kapi.Service) *Service
 }
 
 // FindOrCreateSyntheticServiceNode returns the existing service node or creates a synthetic node in its place
-func FindOrCreateSyntheticServiceNode(g osgraph.MutableUniqueGraph, svc *kapi.Service) *ServiceNode {
+func FindOrCreateSyntheticServiceNode(g osgraph.MutableUniqueGraph, svc *corev1.Service) *ServiceNode {
 	return osgraph.EnsureUnique(g,
 		ServiceNodeName(svc),
 		func(node osgraph.Node) graph.Node {
@@ -55,7 +54,7 @@ func FindOrCreateSyntheticServiceNode(g osgraph.MutableUniqueGraph, svc *kapi.Se
 	).(*ServiceNode)
 }
 
-func EnsureServiceAccountNode(g osgraph.MutableUniqueGraph, o *kapi.ServiceAccount) *ServiceAccountNode {
+func EnsureServiceAccountNode(g osgraph.MutableUniqueGraph, o *corev1.ServiceAccount) *ServiceAccountNode {
 	return osgraph.EnsureUnique(g,
 		ServiceAccountNodeName(o),
 		func(node osgraph.Node) graph.Node {
@@ -64,7 +63,7 @@ func EnsureServiceAccountNode(g osgraph.MutableUniqueGraph, o *kapi.ServiceAccou
 	).(*ServiceAccountNode)
 }
 
-func FindOrCreateSyntheticServiceAccountNode(g osgraph.MutableUniqueGraph, o *kapi.ServiceAccount) *ServiceAccountNode {
+func FindOrCreateSyntheticServiceAccountNode(g osgraph.MutableUniqueGraph, o *corev1.ServiceAccount) *ServiceAccountNode {
 	return osgraph.EnsureUnique(g,
 		ServiceAccountNodeName(o),
 		func(node osgraph.Node) graph.Node {
@@ -73,7 +72,7 @@ func FindOrCreateSyntheticServiceAccountNode(g osgraph.MutableUniqueGraph, o *ka
 	).(*ServiceAccountNode)
 }
 
-func EnsureSecretNode(g osgraph.MutableUniqueGraph, o *kapi.Secret) *SecretNode {
+func EnsureSecretNode(g osgraph.MutableUniqueGraph, o *corev1.Secret) *SecretNode {
 	return osgraph.EnsureUnique(g,
 		SecretNodeName(o),
 		func(node osgraph.Node) graph.Node {
@@ -86,7 +85,7 @@ func EnsureSecretNode(g osgraph.MutableUniqueGraph, o *kapi.Secret) *SecretNode
 	).(*SecretNode)
 }
 
-func FindOrCreateSyntheticSecretNode(g osgraph.MutableUniqueGraph, o *kapi.Secret) *SecretNode {
+func FindOrCreateSyntheticSecretNode(g osgraph.MutableUniqueGraph, o *corev1.Secret) *SecretNode {
 	return osgraph.EnsureUnique(g,
 		SecretNodeName(o),
 		func(node osgraph.Node) graph.Node {
@@ -100,7 +99,7 @@ func FindOrCreateSyntheticSecretNode(g osgraph.MutableUniqueGraph, o *kapi.Secre
 }
 
 // EnsureReplicationControllerNode adds a graph node for the ReplicationController if it does not already exist.
-func EnsureReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *kapi.ReplicationController) *ReplicationControllerNode {
+func EnsureReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *corev1.ReplicationController) *ReplicationControllerNode {
 	rcNodeName := ReplicationControllerNodeName(rc)
 	rcNode := osgraph.EnsureUnique(g,
 		rcNodeName,
@@ -116,7 +115,7 @@ func EnsureReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *kapi.Repl
 }
 
 // EnsureReplicaSetNode adds a graph node for the ReplicaSet if it does not already exist.
-func EnsureReplicaSetNode(g osgraph.MutableUniqueGraph, rs *extensions.ReplicaSet) *ReplicaSetNode {
+func EnsureReplicaSetNode(g osgraph.MutableUniqueGraph, rs *kappsv1.ReplicaSet) *ReplicaSetNode {
 	rsNodeName := ReplicaSetNodeName(rs)
 	rsNode := osgraph.EnsureUnique(g,
 		rsNodeName,
@@ -131,7 +130,7 @@ func EnsureReplicaSetNode(g osgraph.MutableUniqueGraph, rs *extensions.ReplicaSe
 	return rsNode
 }
 
-func EnsureReplicaSetSpecNode(g osgraph.MutableUniqueGraph, rsSpec *extensions.ReplicaSetSpec, namespace string, ownerName osgraph.UniqueName) *ReplicaSetSpecNode {
+func EnsureReplicaSetSpecNode(g osgraph.MutableUniqueGraph, rsSpec *kappsv1.ReplicaSetSpec, namespace string, ownerName osgraph.UniqueName) *ReplicaSetSpecNode {
 	rsSpecName := ReplicaSetSpecNodeName(rsSpec, ownerName)
 	rsSpecNode := osgraph.EnsureUnique(g,
 		rsSpecName,
@@ -146,7 +145,7 @@ func EnsureReplicaSetSpecNode(g osgraph.MutableUniqueGraph, rsSpec *extensions.R
 	return rsSpecNode
 }
 
-func FindOrCreateSyntheticReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *kapi.ReplicationController) *ReplicationControllerNode {
+func FindOrCreateSyntheticReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *corev1.ReplicationController) *ReplicationControllerNode {
 	return osgraph.EnsureUnique(g,
 		ReplicationControllerNodeName(rc),
 		func(node osgraph.Node) graph.Node {
@@ -155,7 +154,7 @@ func FindOrCreateSyntheticReplicationControllerNode(g osgraph.MutableUniqueGraph
 	).(*ReplicationControllerNode)
 }
 
-func FindOrCreateSyntheticDeploymentNode(g osgraph.MutableUniqueGraph, deployment *extensions.Deployment) *DeploymentNode {
+func FindOrCreateSyntheticDeploymentNode(g osgraph.MutableUniqueGraph, deployment *kappsv1.Deployment) *DeploymentNode {
 	return osgraph.EnsureUnique(
 		g,
 		DeploymentNodeName(deployment),
@@ -165,7 +164,7 @@ func FindOrCreateSyntheticDeploymentNode(g osgraph.MutableUniqueGraph, deploymen
 	).(*DeploymentNode)
 }
 
-func EnsureReplicationControllerSpecNode(g osgraph.MutableUniqueGraph, rcSpec *kapi.ReplicationControllerSpec, namespace string, ownerName osgraph.UniqueName) *ReplicationControllerSpecNode {
+func EnsureReplicationControllerSpecNode(g osgraph.MutableUniqueGraph, rcSpec *corev1.ReplicationControllerSpec, namespace string, ownerName osgraph.UniqueName) *ReplicationControllerSpecNode {
 	rcSpecName := ReplicationControllerSpecNodeName(rcSpec, ownerName)
 	rcSpecNode := osgraph.EnsureUnique(g,
 		rcSpecName,
@@ -182,7 +181,7 @@ func EnsureReplicationControllerSpecNode(g osgraph.MutableUniqueGraph, rcSpec *k
 	return rcSpecNode
 }
 
-func EnsurePodTemplateSpecNode(g osgraph.MutableUniqueGraph, ptSpec *kapi.PodTemplateSpec, namespace string, ownerName osgraph.UniqueName) *PodTemplateSpecNode {
+func EnsurePodTemplateSpecNode(g osgraph.MutableUniqueGraph, ptSpec *corev1.PodTemplateSpec, namespace string, ownerName osgraph.UniqueName) *PodTemplateSpecNode {
 	ptSpecName := PodTemplateSpecNodeName(ptSpec, ownerName)
 	ptSpecNode := osgraph.EnsureUnique(g,
 		ptSpecName,
@@ -197,7 +196,7 @@ func EnsurePodTemplateSpecNode(g osgraph.MutableUniqueGraph, ptSpec *kapi.PodTem
 	return ptSpecNode
 }
 
-func EnsurePersistentVolumeClaimNode(g osgraph.MutableUniqueGraph, pvc *kapi.PersistentVolumeClaim) *PersistentVolumeClaimNode {
+func EnsurePersistentVolumeClaimNode(g osgraph.MutableUniqueGraph, pvc *corev1.PersistentVolumeClaim) *PersistentVolumeClaimNode {
 	return osgraph.EnsureUnique(g,
 		PersistentVolumeClaimNodeName(pvc),
 		func(node osgraph.Node) graph.Node {
@@ -206,7 +205,7 @@ func EnsurePersistentVolumeClaimNode(g osgraph.MutableUniqueGraph, pvc *kapi.Per
 	).(*PersistentVolumeClaimNode)
 }
 
-func FindOrCreateSyntheticPVCNode(g osgraph.MutableUniqueGraph, pvc *kapi.PersistentVolumeClaim) *PersistentVolumeClaimNode {
+func FindOrCreateSyntheticPVCNode(g osgraph.MutableUniqueGraph, pvc *corev1.PersistentVolumeClaim) *PersistentVolumeClaimNode {
 	return osgraph.EnsureUnique(g,
 		PersistentVolumeClaimNodeName(pvc),
 		func(node osgraph.Node) graph.Node {
@@ -215,7 +214,7 @@ func FindOrCreateSyntheticPVCNode(g osgraph.MutableUniqueGraph, pvc *kapi.Persis
 	).(*PersistentVolumeClaimNode)
 }
 
-func EnsureHorizontalPodAutoscalerNode(g osgraph.MutableUniqueGraph, hpa *autoscaling.HorizontalPodAutoscaler) *HorizontalPodAutoscalerNode {
+func EnsureHorizontalPodAutoscalerNode(g osgraph.MutableUniqueGraph, hpa *autoscalingv1.HorizontalPodAutoscaler) *HorizontalPodAutoscalerNode {
 	return osgraph.EnsureUnique(g,
 		HorizontalPodAutoscalerNodeName(hpa),
 		func(node osgraph.Node) graph.Node {
@@ -224,7 +223,7 @@ func EnsureHorizontalPodAutoscalerNode(g osgraph.MutableUniqueGraph, hpa *autosc
 	).(*HorizontalPodAutoscalerNode)
 }
 
-func EnsureStatefulSetNode(g osgraph.MutableUniqueGraph, statefulSet *kapps.StatefulSet) *StatefulSetNode {
+func EnsureStatefulSetNode(g osgraph.MutableUniqueGraph, statefulSet *kappsv1.StatefulSet) *StatefulSetNode {
 	nodeName := StatefulSetNodeName(statefulSet)
 	node := osgraph.EnsureUnique(g,
 		nodeName,
@@ -239,7 +238,7 @@ func EnsureStatefulSetNode(g osgraph.MutableUniqueGraph, statefulSet *kapps.Stat
 	return node
 }
 
-func EnsureStatefulSetSpecNode(g osgraph.MutableUniqueGraph, spec *kapps.StatefulSetSpec, namespace string, ownerName osgraph.UniqueName) *StatefulSetSpecNode {
+func EnsureStatefulSetSpecNode(g osgraph.MutableUniqueGraph, spec *kappsv1.StatefulSetSpec, namespace string, ownerName osgraph.UniqueName) *StatefulSetSpecNode {
 	specName := StatefulSetSpecNodeName(spec, ownerName)
 	specNode := osgraph.EnsureUnique(g,
 		specName,
@@ -254,7 +253,7 @@ func EnsureStatefulSetSpecNode(g osgraph.MutableUniqueGraph, spec *kapps.Statefu
 	return specNode
 }
 
-func EnsureDeploymentNode(g osgraph.MutableUniqueGraph, deployment *extensions.Deployment) *DeploymentNode {
+func EnsureDeploymentNode(g osgraph.MutableUniqueGraph, deployment *kappsv1.Deployment) *DeploymentNode {
 	nodeName := DeploymentNodeName(deployment)
 	node := osgraph.EnsureUnique(g,
 		nodeName,
@@ -269,7 +268,7 @@ func EnsureDeploymentNode(g osgraph.MutableUniqueGraph, deployment *extensions.D
 	return node
 }
 
-func EnsureDeploymentSpecNode(g osgraph.MutableUniqueGraph, spec *extensions.DeploymentSpec, namespace string, ownerName osgraph.UniqueName) *DeploymentSpecNode {
+func EnsureDeploymentSpecNode(g osgraph.MutableUniqueGraph, spec *kappsv1.DeploymentSpec, namespace string, ownerName osgraph.UniqueName) *DeploymentSpecNode {
 	specName := DeploymentSpecNodeName(spec, ownerName)
 	specNode := osgraph.EnsureUnique(g,
 		specName,
@@ -285,7 +284,7 @@ func EnsureDeploymentSpecNode(g osgraph.MutableUniqueGraph, spec *extensions.Dep
 }
 
 // EnsureDaemonSetNode adds the provided daemon set to the graph if it does not exist
-func EnsureDaemonSetNode(g osgraph.MutableUniqueGraph, ds *extensions.DaemonSet) *DaemonSetNode {
+func EnsureDaemonSetNode(g osgraph.MutableUniqueGraph, ds *kappsv1.DaemonSet) *DaemonSetNode {
 	dsName := DaemonSetNodeName(ds)
 	dsNode := osgraph.EnsureUnique(
 		g,
@@ -301,7 +300,7 @@ func EnsureDaemonSetNode(g osgraph.MutableUniqueGraph, ds *extensions.DaemonSet)
 	return dsNode
 }
 
-func FindOrCreateSyntheticDaemonSetNode(g osgraph.MutableUniqueGraph, ds *extensions.DaemonSet) *DaemonSetNode {
+func FindOrCreateSyntheticDaemonSetNode(g osgraph.MutableUniqueGraph, ds *kappsv1.DaemonSet) *DaemonSetNode {
 	return osgraph.EnsureUnique(
 		g,
 		DaemonSetNodeName(ds),
@@ -311,7 +310,7 @@ func FindOrCreateSyntheticDaemonSetNode(g osgraph.MutableUniqueGraph, ds *extens
 	).(*DaemonSetNode)
 }
 
-func FindOrCreateSyntheticReplicaSetNode(g osgraph.MutableUniqueGraph, rs *extensions.ReplicaSet) *ReplicaSetNode {
+func FindOrCreateSyntheticReplicaSetNode(g osgraph.MutableUniqueGraph, rs *kappsv1.ReplicaSet) *ReplicaSetNode {
 	return osgraph.EnsureUnique(
 		g,
 		ReplicaSetNodeName(rs),
diff --git a/pkg/oc/lib/graph/kubegraph/nodes/nodes_test.go b/pkg/oc/lib/graph/kubegraph/nodes/nodes_test.go
index a7ff797ce30a..8c52e46357ad 100644
--- a/pkg/oc/lib/graph/kubegraph/nodes/nodes_test.go
+++ b/pkg/oc/lib/graph/kubegraph/nodes/nodes_test.go
@@ -3,7 +3,7 @@ package nodes
 import (
 	"testing"
 
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
@@ -11,7 +11,7 @@ import (
 func TestPodSpecNode(t *testing.T) {
 	g := osgraph.New()
 
-	pod := &kapi.Pod{}
+	pod := &corev1.Pod{}
 	pod.Namespace = "ns"
 	pod.Name = "foo"
 	pod.Spec.NodeName = "any-host"
@@ -38,10 +38,10 @@ func TestPodSpecNode(t *testing.T) {
 func TestReplicationControllerSpecNode(t *testing.T) {
 	g := osgraph.New()
 
-	rc := &kapi.ReplicationController{}
+	rc := &corev1.ReplicationController{}
 	rc.Namespace = "ns"
 	rc.Name = "foo"
-	rc.Spec.Template = &kapi.PodTemplateSpec{}
+	rc.Spec.Template = &corev1.PodTemplateSpec{}
 
 	rcNode := EnsureReplicationControllerNode(g, rc)
 
diff --git a/pkg/oc/lib/graph/kubegraph/nodes/types.go b/pkg/oc/lib/graph/kubegraph/nodes/types.go
index d4c403598f1f..af5b7907e21e 100644
--- a/pkg/oc/lib/graph/kubegraph/nodes/types.go
+++ b/pkg/oc/lib/graph/kubegraph/nodes/types.go
@@ -4,41 +4,40 @@ import (
 	"fmt"
 	"reflect"
 
-	kapps "k8s.io/kubernetes/pkg/apis/apps"
-	"k8s.io/kubernetes/pkg/apis/autoscaling"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
-	"k8s.io/kubernetes/pkg/apis/extensions"
+	kappsv1 "k8s.io/api/apps/v1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	corev1 "k8s.io/api/core/v1"
 
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 )
 
 var (
-	ServiceNodeKind                   = reflect.TypeOf(kapi.Service{}).Name()
-	PodNodeKind                       = reflect.TypeOf(kapi.Pod{}).Name()
-	PodSpecNodeKind                   = reflect.TypeOf(kapi.PodSpec{}).Name()
-	PodTemplateSpecNodeKind           = reflect.TypeOf(kapi.PodTemplateSpec{}).Name()
-	ReplicationControllerNodeKind     = reflect.TypeOf(kapi.ReplicationController{}).Name()
-	ReplicationControllerSpecNodeKind = reflect.TypeOf(kapi.ReplicationControllerSpec{}).Name()
-	ServiceAccountNodeKind            = reflect.TypeOf(kapi.ServiceAccount{}).Name()
-	SecretNodeKind                    = reflect.TypeOf(kapi.Secret{}).Name()
-	PersistentVolumeClaimNodeKind     = reflect.TypeOf(kapi.PersistentVolumeClaim{}).Name()
-	HorizontalPodAutoscalerNodeKind   = reflect.TypeOf(autoscaling.HorizontalPodAutoscaler{}).Name()
-	StatefulSetNodeKind               = reflect.TypeOf(kapps.StatefulSet{}).Name()
-	StatefulSetSpecNodeKind           = reflect.TypeOf(kapps.StatefulSetSpec{}).Name()
-	DeploymentNodeKind                = reflect.TypeOf(extensions.Deployment{}).Name()
-	DeploymentSpecNodeKind            = reflect.TypeOf(extensions.DeploymentSpec{}).Name()
-	ReplicaSetNodeKind                = reflect.TypeOf(extensions.ReplicaSet{}).Name()
-	ReplicaSetSpecNodeKind            = reflect.TypeOf(extensions.ReplicaSetSpec{}).Name()
-	DaemonSetNodeKind                 = reflect.TypeOf(extensions.DaemonSet{}).Name()
+	ServiceNodeKind                   = reflect.TypeOf(corev1.Service{}).Name()
+	PodNodeKind                       = reflect.TypeOf(corev1.Pod{}).Name()
+	PodSpecNodeKind                   = reflect.TypeOf(corev1.PodSpec{}).Name()
+	PodTemplateSpecNodeKind           = reflect.TypeOf(corev1.PodTemplateSpec{}).Name()
+	ReplicationControllerNodeKind     = reflect.TypeOf(corev1.ReplicationController{}).Name()
+	ReplicationControllerSpecNodeKind = reflect.TypeOf(corev1.ReplicationControllerSpec{}).Name()
+	ServiceAccountNodeKind            = reflect.TypeOf(corev1.ServiceAccount{}).Name()
+	SecretNodeKind                    = reflect.TypeOf(corev1.Secret{}).Name()
+	PersistentVolumeClaimNodeKind     = reflect.TypeOf(corev1.PersistentVolumeClaim{}).Name()
+	HorizontalPodAutoscalerNodeKind   = reflect.TypeOf(autoscalingv1.HorizontalPodAutoscaler{}).Name()
+	StatefulSetNodeKind               = reflect.TypeOf(kappsv1.StatefulSet{}).Name()
+	StatefulSetSpecNodeKind           = reflect.TypeOf(kappsv1.StatefulSetSpec{}).Name()
+	DeploymentNodeKind                = reflect.TypeOf(kappsv1.Deployment{}).Name()
+	DeploymentSpecNodeKind            = reflect.TypeOf(kappsv1.DeploymentSpec{}).Name()
+	ReplicaSetNodeKind                = reflect.TypeOf(kappsv1.ReplicaSet{}).Name()
+	ReplicaSetSpecNodeKind            = reflect.TypeOf(kappsv1.ReplicaSetSpec{}).Name()
+	DaemonSetNodeKind                 = reflect.TypeOf(kappsv1.DaemonSet{}).Name()
 )
 
-func ServiceNodeName(o *kapi.Service) osgraph.UniqueName {
+func ServiceNodeName(o *corev1.Service) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ServiceNodeKind, o)
 }
 
 type ServiceNode struct {
 	osgraph.Node
-	*kapi.Service
+	*corev1.Service
 
 	IsFound bool
 }
@@ -59,13 +58,13 @@ func (n ServiceNode) Found() bool {
 	return n.IsFound
 }
 
-func PodNodeName(o *kapi.Pod) osgraph.UniqueName {
+func PodNodeName(o *corev1.Pod) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(PodNodeKind, o)
 }
 
 type PodNode struct {
 	osgraph.Node
-	*kapi.Pod
+	*corev1.Pod
 }
 
 func (n PodNode) Object() interface{} {
@@ -84,13 +83,13 @@ func (*PodNode) Kind() string {
 	return PodNodeKind
 }
 
-func PodSpecNodeName(o *kapi.PodSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
+func PodSpecNodeName(o *corev1.PodSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
 	return osgraph.UniqueName(fmt.Sprintf("%s|%v", PodSpecNodeKind, ownerName))
 }
 
 type PodSpecNode struct {
 	osgraph.Node
-	*kapi.PodSpec
+	*corev1.PodSpec
 	Namespace string
 
 	OwnerName osgraph.UniqueName
@@ -112,13 +111,13 @@ func (*PodSpecNode) Kind() string {
 	return PodSpecNodeKind
 }
 
-func ReplicaSetNodeName(o *extensions.ReplicaSet) osgraph.UniqueName {
+func ReplicaSetNodeName(o *kappsv1.ReplicaSet) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ReplicaSetNodeKind, o)
 }
 
 type ReplicaSetNode struct {
 	osgraph.Node
-	ReplicaSet *extensions.ReplicaSet
+	ReplicaSet *kappsv1.ReplicaSet
 
 	IsFound bool
 }
@@ -143,13 +142,13 @@ func (*ReplicaSetNode) Kind() string {
 	return ReplicaSetNodeKind
 }
 
-func ReplicaSetSpecNodeName(o *extensions.ReplicaSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
+func ReplicaSetSpecNodeName(o *kappsv1.ReplicaSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
 	return osgraph.UniqueName(fmt.Sprintf("%s|%v", ReplicaSetSpecNodeKind, ownerName))
 }
 
 type ReplicaSetSpecNode struct {
 	osgraph.Node
-	ReplicaSetSpec *extensions.ReplicaSetSpec
+	ReplicaSetSpec *kappsv1.ReplicaSetSpec
 	Namespace      string
 
 	OwnerName osgraph.UniqueName
@@ -171,13 +170,13 @@ func (*ReplicaSetSpecNode) Kind() string {
 	return ReplicaSetSpecNodeKind
 }
 
-func ReplicationControllerNodeName(o *kapi.ReplicationController) osgraph.UniqueName {
+func ReplicationControllerNodeName(o *corev1.ReplicationController) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ReplicationControllerNodeKind, o)
 }
 
 type ReplicationControllerNode struct {
 	osgraph.Node
-	ReplicationController *kapi.ReplicationController
+	ReplicationController *corev1.ReplicationController
 
 	IsFound bool
 }
@@ -202,13 +201,13 @@ func (*ReplicationControllerNode) Kind() string {
 	return ReplicationControllerNodeKind
 }
 
-func ReplicationControllerSpecNodeName(o *kapi.ReplicationControllerSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
+func ReplicationControllerSpecNodeName(o *corev1.ReplicationControllerSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
 	return osgraph.UniqueName(fmt.Sprintf("%s|%v", ReplicationControllerSpecNodeKind, ownerName))
 }
 
 type ReplicationControllerSpecNode struct {
 	osgraph.Node
-	ReplicationControllerSpec *kapi.ReplicationControllerSpec
+	ReplicationControllerSpec *corev1.ReplicationControllerSpec
 	Namespace                 string
 
 	OwnerName osgraph.UniqueName
@@ -230,13 +229,13 @@ func (*ReplicationControllerSpecNode) Kind() string {
 	return ReplicationControllerSpecNodeKind
 }
 
-func PodTemplateSpecNodeName(o *kapi.PodTemplateSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
+func PodTemplateSpecNodeName(o *corev1.PodTemplateSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
 	return osgraph.UniqueName(fmt.Sprintf("%s|%v", PodTemplateSpecNodeKind, ownerName))
 }
 
 type PodTemplateSpecNode struct {
 	osgraph.Node
-	*kapi.PodTemplateSpec
+	*corev1.PodTemplateSpec
 	Namespace string
 
 	OwnerName osgraph.UniqueName
@@ -258,13 +257,13 @@ func (*PodTemplateSpecNode) Kind() string {
 	return PodTemplateSpecNodeKind
 }
 
-func ServiceAccountNodeName(o *kapi.ServiceAccount) osgraph.UniqueName {
+func ServiceAccountNodeName(o *corev1.ServiceAccount) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(ServiceAccountNodeKind, o)
 }
 
 type ServiceAccountNode struct {
 	osgraph.Node
-	*kapi.ServiceAccount
+	*corev1.ServiceAccount
 
 	IsFound bool
 }
@@ -285,13 +284,13 @@ func (*ServiceAccountNode) Kind() string {
 	return ServiceAccountNodeKind
 }
 
-func SecretNodeName(o *kapi.Secret) osgraph.UniqueName {
+func SecretNodeName(o *corev1.Secret) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(SecretNodeKind, o)
 }
 
 type SecretNode struct {
 	osgraph.Node
-	*kapi.Secret
+	*corev1.Secret
 
 	IsFound bool
 }
@@ -312,13 +311,13 @@ func (*SecretNode) Kind() string {
 	return SecretNodeKind
 }
 
-func PersistentVolumeClaimNodeName(o *kapi.PersistentVolumeClaim) osgraph.UniqueName {
+func PersistentVolumeClaimNodeName(o *corev1.PersistentVolumeClaim) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(PersistentVolumeClaimNodeKind, o)
 }
 
 type PersistentVolumeClaimNode struct {
 	osgraph.Node
-	PersistentVolumeClaim *kapi.PersistentVolumeClaim
+	PersistentVolumeClaim *corev1.PersistentVolumeClaim
 
 	IsFound bool
 }
@@ -343,13 +342,13 @@ func (n PersistentVolumeClaimNode) UniqueName() osgraph.UniqueName {
 	return PersistentVolumeClaimNodeName(n.PersistentVolumeClaim)
 }
 
-func HorizontalPodAutoscalerNodeName(o *autoscaling.HorizontalPodAutoscaler) osgraph.UniqueName {
+func HorizontalPodAutoscalerNodeName(o *autoscalingv1.HorizontalPodAutoscaler) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(HorizontalPodAutoscalerNodeKind, o)
 }
 
 type HorizontalPodAutoscalerNode struct {
 	osgraph.Node
-	HorizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler
+	HorizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler
 }
 
 func (n HorizontalPodAutoscalerNode) Object() interface{} {
@@ -368,13 +367,13 @@ func (n HorizontalPodAutoscalerNode) UniqueName() osgraph.UniqueName {
 	return HorizontalPodAutoscalerNodeName(n.HorizontalPodAutoscaler)
 }
 
-func DeploymentNodeName(o *extensions.Deployment) osgraph.UniqueName {
+func DeploymentNodeName(o *kappsv1.Deployment) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(DeploymentNodeKind, o)
 }
 
 type DeploymentNode struct {
 	osgraph.Node
-	Deployment *extensions.Deployment
+	Deployment *kappsv1.Deployment
 
 	IsFound bool
 }
@@ -399,13 +398,13 @@ func (*DeploymentNode) Kind() string {
 	return DeploymentNodeKind
 }
 
-func DeploymentSpecNodeName(o *extensions.DeploymentSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
+func DeploymentSpecNodeName(o *kappsv1.DeploymentSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
 	return osgraph.UniqueName(fmt.Sprintf("%s|%v", DeploymentSpecNodeKind, ownerName))
 }
 
 type DeploymentSpecNode struct {
 	osgraph.Node
-	DeploymentSpec *extensions.DeploymentSpec
+	DeploymentSpec *kappsv1.DeploymentSpec
 	Namespace      string
 
 	OwnerName osgraph.UniqueName
@@ -427,13 +426,13 @@ func (*DeploymentSpecNode) Kind() string {
 	return DeploymentSpecNodeKind
 }
 
-func StatefulSetNodeName(o *kapps.StatefulSet) osgraph.UniqueName {
+func StatefulSetNodeName(o *kappsv1.StatefulSet) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(StatefulSetNodeKind, o)
 }
 
 type StatefulSetNode struct {
 	osgraph.Node
-	StatefulSet *kapps.StatefulSet
+	StatefulSet *kappsv1.StatefulSet
 
 	IsFound bool
 }
@@ -458,13 +457,13 @@ func (*StatefulSetNode) Kind() string {
 	return StatefulSetNodeKind
 }
 
-func StatefulSetSpecNodeName(o *kapps.StatefulSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
+func StatefulSetSpecNodeName(o *kappsv1.StatefulSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
 	return osgraph.UniqueName(fmt.Sprintf("%s|%v", StatefulSetSpecNodeKind, ownerName))
 }
 
 type StatefulSetSpecNode struct {
 	osgraph.Node
-	StatefulSetSpec *kapps.StatefulSetSpec
+	StatefulSetSpec *kappsv1.StatefulSetSpec
 	Namespace       string
 
 	OwnerName osgraph.UniqueName
@@ -486,13 +485,13 @@ func (*StatefulSetSpecNode) Kind() string {
 	return StatefulSetSpecNodeKind
 }
 
-func DaemonSetNodeName(o *extensions.DaemonSet) osgraph.UniqueName {
+func DaemonSetNodeName(o *kappsv1.DaemonSet) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(DaemonSetNodeKind, o)
 }
 
 type DaemonSetNode struct {
 	osgraph.Node
-	DaemonSet *extensions.DaemonSet
+	DaemonSet *kappsv1.DaemonSet
 
 	IsFound bool
 }
diff --git a/pkg/oc/lib/graph/routegraph/analysis/analysis.go b/pkg/oc/lib/graph/routegraph/analysis/analysis.go
index ea4b4392a67f..b57ac1310bb9 100644
--- a/pkg/oc/lib/graph/routegraph/analysis/analysis.go
+++ b/pkg/oc/lib/graph/routegraph/analysis/analysis.go
@@ -6,14 +6,14 @@ import (
 
 	"github.com/gonum/graph"
 
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 
+	routev1 "github.com/openshift/api/route/v1"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
 	routeedges "github.com/openshift/origin/pkg/oc/lib/graph/routegraph"
 	routegraph "github.com/openshift/origin/pkg/oc/lib/graph/routegraph/nodes"
 	"github.com/openshift/origin/pkg/oc/lib/routedisplayhelpers"
-	routeapi "github.com/openshift/origin/pkg/route/apis/route"
 )
 
 const (
@@ -168,8 +168,8 @@ func FindRouteAdmissionFailures(g osgraph.Graph, f osgraph.Namer) []osgraph.Mark
 		routeNode := uncastRouteNode.(*routegraph.RouteNode)
 	Route:
 		for _, ingress := range routeNode.Status.Ingress {
-			switch status, condition := routedisplayhelpers.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status {
-			case kapi.ConditionFalse:
+			switch status, condition := routedisplayhelpers.IngressConditionStatus(&ingress, routev1.RouteAdmitted); status {
+			case corev1.ConditionFalse:
 				markers = append(markers, osgraph.Marker{
 					Node: routeNode,
 
@@ -213,7 +213,7 @@ func FindPathBasedPassthroughRoutes(g osgraph.Graph, f osgraph.Namer) []osgraph.
 	for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) {
 		routeNode := uncastRouteNode.(*routegraph.RouteNode)
 
-		if len(routeNode.Spec.Path) > 0 && routeNode.Spec.TLS != nil && routeNode.Spec.TLS.Termination == routeapi.TLSTerminationPassthrough {
+		if len(routeNode.Spec.Path) > 0 && routeNode.Spec.TLS != nil && routeNode.Spec.TLS.Termination == routev1.TLSTerminationPassthrough {
 			markers = append(markers, osgraph.Marker{
 				Node: routeNode,
 
diff --git a/pkg/oc/lib/graph/routegraph/edges.go b/pkg/oc/lib/graph/routegraph/edges.go
index f0ac8d4de6d5..9ffb6c267dff 100644
--- a/pkg/oc/lib/graph/routegraph/edges.go
+++ b/pkg/oc/lib/graph/routegraph/edges.go
@@ -3,7 +3,7 @@ package routegraph
 import (
 	"github.com/gonum/graph"
 
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
 	kubegraph "github.com/openshift/origin/pkg/oc/lib/graph/kubegraph/nodes"
@@ -18,7 +18,7 @@ const (
 
 // AddRouteEdges adds an edge that connect a service to a route in the given graph
 func AddRouteEdges(g osgraph.MutableUniqueGraph, node *routegraph.RouteNode) {
-	syntheticService := &kapi.Service{}
+	syntheticService := &corev1.Service{}
 	syntheticService.Namespace = node.Namespace
 	syntheticService.Name = node.Spec.To.Name
 
@@ -26,7 +26,7 @@ func AddRouteEdges(g osgraph.MutableUniqueGraph, node *routegraph.RouteNode) {
 	g.AddEdge(node, serviceNode, ExposedThroughRouteEdgeKind)
 
 	for _, svc := range node.Spec.AlternateBackends {
-		syntheticService := &kapi.Service{}
+		syntheticService := &corev1.Service{}
 		syntheticService.Namespace = node.Namespace
 		syntheticService.Name = svc.Name
 
diff --git a/pkg/oc/lib/graph/routegraph/nodes/nodes.go b/pkg/oc/lib/graph/routegraph/nodes/nodes.go
index 64a48c3f1f9d..546f75950e8f 100644
--- a/pkg/oc/lib/graph/routegraph/nodes/nodes.go
+++ b/pkg/oc/lib/graph/routegraph/nodes/nodes.go
@@ -3,12 +3,12 @@ package nodes
 import (
 	"github.com/gonum/graph"
 
+	routev1 "github.com/openshift/api/route/v1"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	routeapi "github.com/openshift/origin/pkg/route/apis/route"
 )
 
 // EnsureRouteNode adds a graph node for the specific route if it does not exist
-func EnsureRouteNode(g osgraph.MutableUniqueGraph, route *routeapi.Route) *RouteNode {
+func EnsureRouteNode(g osgraph.MutableUniqueGraph, route *routev1.Route) *RouteNode {
 	return osgraph.EnsureUnique(
 		g,
 		RouteNodeName(route),
diff --git a/pkg/oc/lib/graph/routegraph/nodes/types.go b/pkg/oc/lib/graph/routegraph/nodes/types.go
index 7d59ec14bcb4..2022e6e3688f 100644
--- a/pkg/oc/lib/graph/routegraph/nodes/types.go
+++ b/pkg/oc/lib/graph/routegraph/nodes/types.go
@@ -3,21 +3,21 @@ package nodes
 import (
 	"reflect"
 
+	routev1 "github.com/openshift/api/route/v1"
 	osgraph "github.com/openshift/origin/pkg/oc/lib/graph/genericgraph"
-	routeapi "github.com/openshift/origin/pkg/route/apis/route"
 )
 
 var (
-	RouteNodeKind = reflect.TypeOf(routeapi.Route{}).Name()
+	RouteNodeKind = reflect.TypeOf(routev1.Route{}).Name()
 )
 
-func RouteNodeName(o *routeapi.Route) osgraph.UniqueName {
+func RouteNodeName(o *routev1.Route) osgraph.UniqueName {
 	return osgraph.GetUniqueRuntimeObjectNodeName(RouteNodeKind, o)
 }
 
 type RouteNode struct {
 	osgraph.Node
-	*routeapi.Route
+	*routev1.Route
 }
 
 func (n RouteNode) Object() interface{} {
diff --git a/pkg/oc/lib/routedisplayhelpers/status.go b/pkg/oc/lib/routedisplayhelpers/status.go
index a0f14f8121f9..b40027ce86b1 100644
--- a/pkg/oc/lib/routedisplayhelpers/status.go
+++ b/pkg/oc/lib/routedisplayhelpers/status.go
@@ -1,17 +1,17 @@
 package routedisplayhelpers
 
 import (
-	kapi "k8s.io/kubernetes/pkg/apis/core"
+	corev1 "k8s.io/api/core/v1"
 
-	routeapi "github.com/openshift/origin/pkg/route/apis/route"
+	routev1 "github.com/openshift/api/route/v1"
 )
 
-func IngressConditionStatus(ingress *routeapi.RouteIngress, t routeapi.RouteIngressConditionType) (kapi.ConditionStatus, routeapi.RouteIngressCondition) {
+func IngressConditionStatus(ingress *routev1.RouteIngress, t routev1.RouteIngressConditionType) (corev1.ConditionStatus, routev1.RouteIngressCondition) {
 	for _, condition := range ingress.Conditions {
 		if t != condition.Type {
 			continue
 		}
 		return condition.Status, condition
 	}
-	return kapi.ConditionUnknown, routeapi.RouteIngressCondition{}
+	return corev1.ConditionUnknown, routev1.RouteIngressCondition{}
 }
diff --git a/test/extended/router/scoped.go b/test/extended/router/scoped.go
index 0db61a8a1781..7088c6fb4a6c 100644
--- a/test/extended/router/scoped.go
+++ b/test/extended/router/scoped.go
@@ -11,14 +11,16 @@ import (
 	g "github.com/onsi/ginkgo"
 	o "github.com/onsi/gomega"
 
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/wait"
-	kapi "k8s.io/kubernetes/pkg/apis/core"
 	e2e "k8s.io/kubernetes/test/e2e/framework"
 
+	routev1 "github.com/openshift/api/route/v1"
 	routeclientset "github.com/openshift/client-go/route/clientset/versioned"
 	"github.com/openshift/origin/pkg/oc/lib/routedisplayhelpers"
 	routeapi "github.com/openshift/origin/pkg/route/apis/route"
+	routev1conversions "github.com/openshift/origin/pkg/route/apis/route/v1"
 	exutil "github.com/openshift/origin/test/extended/util"
 )
 
@@ -162,8 +164,11 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() {
 			e2e.Logf("Selected: %#v, All: %#v", ingress, r.Status.Ingress)
 			o.Expect(ingress).NotTo(o.BeNil())
 			o.Expect(ingress.Host).To(o.Equal(fmt.Sprintf(pattern, "route-1", ns)))
-			status, condition := routedisplayhelpers.IngressConditionStatus(ingress, routeapi.RouteAdmitted)
-			o.Expect(status).To(o.Equal(kapi.ConditionTrue))
+			external := routev1.RouteIngress{}
+			err = routev1conversions.Convert_route_RouteIngress_To_v1_RouteIngress(ingress, &external, nil)
+			o.Expect(err).NotTo(o.HaveOccurred())
+			status, condition := routedisplayhelpers.IngressConditionStatus(&external, routev1.RouteAdmitted)
+			o.Expect(status).To(o.Equal(corev1.ConditionTrue))
 			o.Expect(condition.LastTransitionTime).NotTo(o.BeNil())
 		})
 
@@ -221,8 +226,11 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() {
 			ingress := ingressForName(r, "test-override-domains")
 			o.Expect(ingress).NotTo(o.BeNil())
 			o.Expect(ingress.Host).To(o.Equal(fmt.Sprintf(pattern, "route-override-domain-2", ns)))
-			status, condition := routedisplayhelpers.IngressConditionStatus(ingress, routeapi.RouteAdmitted)
-			o.Expect(status).To(o.Equal(kapi.ConditionTrue))
+			external := routev1.RouteIngress{}
+			err = routev1conversions.Convert_route_RouteIngress_To_v1_RouteIngress(ingress, &external, nil)
+			o.Expect(err).NotTo(o.HaveOccurred())
+			status, condition := routedisplayhelpers.IngressConditionStatus(&external, routev1.RouteAdmitted)
+			o.Expect(status).To(o.Equal(corev1.ConditionTrue))
 			o.Expect(condition.LastTransitionTime).NotTo(o.BeNil())
 		})
 	})
diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
index a435e5eaceff..815bed11aec1 100644
--- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
+++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
@@ -899,11 +899,11 @@ func WaitForObservedDeploymentInternal(getDeploymentFunc func() (*internalextens
 // 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
 // 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
 func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
-	surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true)
+	surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
 	if err != nil {
 		return 0, 0, err
 	}
-	unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false)
+	unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
 	if err != nil {
 		return 0, 0, err
 	}
diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
index 1d90e848d0d0..d772ca50ee4f 100644
--- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
+++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
@@ -617,52 +617,83 @@ func TestGetReplicaCountForReplicaSets(t *testing.T) {
 
 func TestResolveFenceposts(t *testing.T) {
 	tests := []struct {
-		maxSurge          string
-		maxUnavailable    string
+		maxSurge          *string
+		maxUnavailable    *string
 		desired           int32
 		expectSurge       int32
 		expectUnavailable int32
 		expectError       bool
 	}{
 		{
-			maxSurge:          "0%",
-			maxUnavailable:    "0%",
+			maxSurge:          newString("0%"),
+			maxUnavailable:    newString("0%"),
 			desired:           0,
 			expectSurge:       0,
 			expectUnavailable: 1,
 			expectError:       false,
 		},
 		{
-			maxSurge:          "39%",
-			maxUnavailable:    "39%",
+			maxSurge:          newString("39%"),
+			maxUnavailable:    newString("39%"),
 			desired:           10,
 			expectSurge:       4,
 			expectUnavailable: 3,
 			expectError:       false,
 		},
 		{
-			maxSurge:          "oops",
-			maxUnavailable:    "39%",
+			maxSurge:          newString("oops"),
+			maxUnavailable:    newString("39%"),
 			desired:           10,
 			expectSurge:       0,
 			expectUnavailable: 0,
 			expectError:       true,
 		},
 		{
-			maxSurge:          "55%",
-			maxUnavailable:    "urg",
+			maxSurge:          newString("55%"),
+			maxUnavailable:    newString("urg"),
 			desired:           10,
 			expectSurge:       0,
 			expectUnavailable: 0,
 			expectError:       true,
 		},
+		{
+			maxSurge:          nil,
+			maxUnavailable:    newString("39%"),
+			desired:           10,
+			expectSurge:       0,
+			expectUnavailable: 3,
+			expectError:       false,
+		},
+		{
+			maxSurge:          newString("39%"),
+			maxUnavailable:    nil,
+			desired:           10,
+			expectSurge:       4,
+			expectUnavailable: 0,
+			expectError:       false,
+		},
+		{
+			maxSurge:          nil,
+			maxUnavailable:    nil,
+			desired:           10,
+			expectSurge:       0,
+			expectUnavailable: 1,
+			expectError:       false,
+		},
 	}
 
 	for num, test := range tests {
-		t.Run("maxSurge="+test.maxSurge, func(t *testing.T) {
-			maxSurge := intstr.FromString(test.maxSurge)
-			maxUnavail := intstr.FromString(test.maxUnavailable)
-			surge, unavail, err := ResolveFenceposts(&maxSurge, &maxUnavail, test.desired)
+		t.Run(fmt.Sprintf("%d", num), func(t *testing.T) {
+			var maxSurge, maxUnavail *intstr.IntOrString
+			if test.maxSurge != nil {
+				surge := intstr.FromString(*test.maxSurge)
+				maxSurge = &surge
+			}
+			if test.maxUnavailable != nil {
+				unavail := intstr.FromString(*test.maxUnavailable)
+				maxUnavail = &unavail
+			}
+			surge, unavail, err := ResolveFenceposts(maxSurge, maxUnavail, test.desired)
 			if err != nil && !test.expectError {
 				t.Errorf("unexpected error %v", err)
 			}
@@ -676,6 +707,10 @@ func TestResolveFenceposts(t *testing.T) {
 	}
 }
 
+func newString(s string) *string {
+	return &s
+}
+
 func TestNewRSNewReplicas(t *testing.T) {
 	tests := []struct {
 		Name          string
diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index 231498ca0324..642b83cec217 100644
--- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -18,6 +18,7 @@ package intstr
 
 import (
 	"encoding/json"
+	"errors"
 	"fmt"
 	"math"
 	"runtime/debug"
@@ -142,7 +143,17 @@ func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
 	}
 }
 
+func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString {
+	if intOrPercent == nil {
+		return &defaultValue
+	}
+	return intOrPercent
+}
+
 func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
+	if intOrPercent == nil {
+		return 0, errors.New("nil value for IntOrString")
+	}
 	value, isPercent, err := getIntOrPercentValue(intOrPercent)
 	if err != nil {
 		return 0, fmt.Errorf("invalid value for IntOrString: %v", err)
diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr_test.go
index 4faba46f8d09..690fe2d5331b 100644
--- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr_test.go
+++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr_test.go
@@ -174,3 +174,10 @@ func TestGetValueFromIntOrPercent(t *testing.T) {
 		}
 	}
 }
+
+func TestGetValueFromIntOrPercentNil(t *testing.T) {
+	_, err := GetValueFromIntOrPercent(nil, 0, false)
+	if err == nil {
+		t.Errorf("expected error got none")
+	}
+}