Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add extended tests for DC ControllerRef #14880

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 108 additions & 0 deletions test/extended/deployments/deployments.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
kapiv1 "k8s.io/kubernetes/pkg/api/v1"
kcontroller "k8s.io/kubernetes/pkg/controller"
e2e "k8s.io/kubernetes/test/e2e/framework"

"github.com/openshift/origin/pkg/client"
Expand All @@ -24,6 +26,7 @@ import (
)

const deploymentRunTimeout = 5 * time.Minute
const deploymentChangeTimeout = 30 * time.Second

var _ = g.Describe("deploymentconfigs", func() {
defer g.GinkgoRecover()
Expand Down Expand Up @@ -968,4 +971,109 @@ var _ = g.Describe("deploymentconfigs", func() {
o.Expect(err).NotTo(o.HaveOccurred())
})
})

g.Describe("", func() {
dcName := "deployment-simple"
g.AfterEach(func() {
failureTrap(oc, dcName, g.CurrentGinkgoTestDescription().Failed)
failureTrapForDetachedRCs(oc, dcName, g.CurrentGinkgoTestDescription().Failed)
})

g.It("should adhere to Three Laws of Controllers [Conformance]", func() {
namespace := oc.Namespace()
rcName := func(i int) string { return fmt.Sprintf("%s-%d", dcName, i) }

var dc *deployapi.DeploymentConfig
var rc1 *kapiv1.ReplicationController
var err error

g.By("should create ControllerRef in RCs it creates", func() {
dc, err = readDCFixture(simpleDeploymentFixture)
o.Expect(err).NotTo(o.HaveOccurred())
dc, err = oc.Client().DeploymentConfigs(namespace).Create(dc)
o.Expect(err).NotTo(o.HaveOccurred())

err = waitForLatestCondition(oc, dcName, deploymentRunTimeout, deploymentRunning)
o.Expect(err).NotTo(o.HaveOccurred())

rc1, err = oc.KubeClient().CoreV1().ReplicationControllers(namespace).Get(rcName(1), metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())
validRef := HasValidDCControllerRef(dc, rc1)
o.Expect(validRef).To(o.BeTrue())
})

err = waitForLatestCondition(oc, dcName, deploymentRunTimeout, deploymentReachedCompletion)
o.Expect(err).NotTo(o.HaveOccurred())

g.By("releasing RCs that no longer match its selector", func() {
dc, err = oc.Client().DeploymentConfigs(namespace).Get(dcName, metav1.GetOptions{})
o.Expect(err).NotTo(o.HaveOccurred())

patch := []byte(fmt.Sprintf(`{"metadata": {"labels":{"openshift.io/deployment-config.name": "%s-detached"}}}`, dcName))
rc1, err = oc.KubeClient().CoreV1().ReplicationControllers(namespace).Patch(rcName(1), types.StrategicMergePatchType, patch)
o.Expect(err).NotTo(o.HaveOccurred())

rc1, err = waitForRCModification(oc, namespace, rcName(1), deploymentChangeTimeout,
rc1.GetResourceVersion(), rCConditionFromMeta(controllerRefChangeCondition(kcontroller.GetControllerOf(rc1))))
o.Expect(err).NotTo(o.HaveOccurred())
controllerRef := kcontroller.GetControllerOf(rc1)
o.Expect(controllerRef).To(o.BeNil())

dc, err = waitForDCModification(oc, namespace, dcName, deploymentChangeTimeout,
dc.GetResourceVersion(), func(config *deployapi.DeploymentConfig) (bool, error) {
return config.Status.AvailableReplicas != dc.Status.AvailableReplicas, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(dc.Status.AvailableReplicas).To(o.BeZero())
o.Expect(dc.Status.UnavailableReplicas).To(o.BeZero())
})

g.By("adopting RCs that match its selector and have no ControllerRef", func() {
patch := []byte(fmt.Sprintf(`{"metadata": {"labels":{"openshift.io/deployment-config.name": "%s"}}}`, dcName))
rc1, err = oc.KubeClient().CoreV1().ReplicationControllers(namespace).Patch(rcName(1), types.StrategicMergePatchType, patch)
o.Expect(err).NotTo(o.HaveOccurred())

rc1, err = waitForRCModification(oc, namespace, rcName(1), deploymentChangeTimeout,
rc1.GetResourceVersion(), rCConditionFromMeta(controllerRefChangeCondition(kcontroller.GetControllerOf(rc1))))
o.Expect(err).NotTo(o.HaveOccurred())
validRef := HasValidDCControllerRef(dc, rc1)
o.Expect(validRef).To(o.BeTrue())

dc, err = waitForDCModification(oc, namespace, dcName, deploymentChangeTimeout,
dc.GetResourceVersion(), func(config *deployapi.DeploymentConfig) (bool, error) {
return config.Status.AvailableReplicas != dc.Status.AvailableReplicas, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(dc.Status.AvailableReplicas).To(o.Equal(dc.Spec.Replicas))
o.Expect(dc.Status.UnavailableReplicas).To(o.BeZero())
})

g.By("deleting owned RCs when deleted", func() {
// FIXME: Add delete option when we have new client available.
// This is working fine now because of finalizers on RCs but when GC gets fixed
// and we remove them this will probably break and will require setting deleteOptions
// to achieve cascade delete
err = oc.Client().DeploymentConfigs(namespace).Delete(dcName)
o.Expect(err).NotTo(o.HaveOccurred())

err = wait.PollImmediate(200*time.Millisecond, 5*time.Minute, func() (bool, error) {
pods, err := oc.KubeClient().CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}
return len(pods.Items) == 0, nil
})
o.Expect(err).NotTo(o.HaveOccurred())

err = wait.PollImmediate(200*time.Millisecond, 30*time.Second, func() (bool, error) {
rcs, err := oc.KubeClient().CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}
return len(rcs.Items) == 0, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
})
})
})
})
126 changes: 126 additions & 0 deletions test/extended/deployments/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,30 @@ package deployments

import (
"fmt"
"io/ioutil"
"reflect"
"sort"
"strings"
"time"

"github.com/ghodss/yaml"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
kapi "k8s.io/kubernetes/pkg/api"
kapiv1 "k8s.io/kubernetes/pkg/api/v1"
kcontroller "k8s.io/kubernetes/pkg/controller"
e2e "k8s.io/kubernetes/test/e2e/framework"

deployapi "github.com/openshift/origin/pkg/deploy/apis/apps"
deployapiv1 "github.com/openshift/origin/pkg/deploy/apis/apps/v1"
deployutil "github.com/openshift/origin/pkg/deploy/util"
exutil "github.com/openshift/origin/test/extended/util"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
)

func deploymentStatuses(rcs []kapi.ReplicationController) []string {
Expand Down Expand Up @@ -385,6 +393,67 @@ func waitForDeployerToComplete(oc *exutil.CLI, name string, timeout time.Duratio
return output, nil
}

func isControllerRefChange(controllee metav1.Object, old *metav1.OwnerReference) (bool, error) {
if old != nil && old.Controller != nil && *old.Controller == false {
return false, fmt.Errorf("old ownerReference is not a controllerRef")
}
return !reflect.DeepEqual(old, kcontroller.GetControllerOf(controllee)), nil
}

func controllerRefChangeCondition(old *metav1.OwnerReference) func(controllee metav1.Object) (bool, error) {
return func(controllee metav1.Object) (bool, error) {
return isControllerRefChange(controllee, old)
}
}

func rCConditionFromMeta(condition func(metav1.Object) (bool, error)) func(rc *kapiv1.ReplicationController) (bool, error) {
return func(rc *kapiv1.ReplicationController) (bool, error) {
return condition(rc)
}
}

func waitForRCModification(oc *exutil.CLI, namespace string, name string, timeout time.Duration, resourceVersion string, condition func(rc *kapiv1.ReplicationController) (bool, error)) (*kapiv1.ReplicationController, error) {
watcher, err := oc.KubeClient().CoreV1().ReplicationControllers(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: name, ResourceVersion: resourceVersion}))
if err != nil {
return nil, err
}

event, err := watch.Until(timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Modified {
return false, fmt.Errorf("different kind of event appeared while waiting for modification: event: %#v", event)
}
return condition(event.Object.(*kapiv1.ReplicationController))
})
if err != nil {
return nil, err
}
if event.Type != watch.Modified {
return nil, fmt.Errorf("waiting for RC modification failed: event: %v", event)
}
return event.Object.(*kapiv1.ReplicationController), nil
}

func waitForDCModification(oc *exutil.CLI, namespace string, name string, timeout time.Duration, resourceVersion string, condition func(rc *deployapi.DeploymentConfig) (bool, error)) (*deployapi.DeploymentConfig, error) {
watcher, err := oc.Client().DeploymentConfigs(namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: name, ResourceVersion: resourceVersion}))
if err != nil {
return nil, err
}

event, err := watch.Until(timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Modified {
return false, fmt.Errorf("different kind of event appeared while waiting for modification: event: %#v", event)
}
return condition(event.Object.(*deployapi.DeploymentConfig))
})
if err != nil {
return nil, err
}
if event.Type != watch.Modified {
return nil, fmt.Errorf("waiting for DC modification failed: event: %v", event)
}
return event.Object.(*deployapi.DeploymentConfig), nil
}

// createFixture will create the provided fixture and return the resource and the
// name separately.
// TODO: Probably move to a more general location like test/extended/util/cli.go
Expand Down Expand Up @@ -437,3 +506,60 @@ func failureTrap(oc *exutil.CLI, name string, failed bool) {
}
}
}

func failureTrapForDetachedRCs(oc *exutil.CLI, dcName string, failed bool) {
if !failed {
return
}
kclient := oc.KubeClient()
requirement, err := labels.NewRequirement(deployapi.DeploymentConfigAnnotation, selection.NotEquals, []string{dcName})
if err != nil {
e2e.Logf("failed to create requirement for DC %q", dcName)
return
}
dc, err := kclient.CoreV1().ReplicationControllers(oc.Namespace()).List(metav1.ListOptions{
LabelSelector: labels.NewSelector().Add(*requirement).String(),
})
if err != nil {
e2e.Logf("Error getting detached RCs; DC %q: %v", dcName, err)
return
}
if len(dc.Items) == 0 {
e2e.Logf("No detached RCs found.")
} else {
out, err := oc.Run("get").Args("rc", "-o", "yaml", "-l", fmt.Sprintf("%s!=%s", deployapi.DeploymentConfigAnnotation, dcName)).Output()
if err != nil {
e2e.Logf("Failed to list detached RCs!")
return
}
e2e.Logf("There are detached RCs: \n%s", out)
}
}

// Checks controllerRef from controllee to DC.
// Return true is the controllerRef is valid, false otherwise
func HasValidDCControllerRef(dc metav1.Object, controllee metav1.Object) bool {
ref := kcontroller.GetControllerOf(controllee)
return ref != nil &&
ref.UID == dc.GetUID() &&
ref.APIVersion == deployutil.DeploymentConfigControllerRefKind.GroupVersion().String() &&
ref.Kind == deployutil.DeploymentConfigControllerRefKind.Kind &&
ref.Name == dc.GetName()
}

func readDCFixture(path string) (*deployapi.DeploymentConfig, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}

dcv1 := new(deployapiv1.DeploymentConfig)
err = yaml.Unmarshal(data, dcv1)
if err != nil {
return nil, err
}

dc := new(deployapi.DeploymentConfig)
err = deployapiv1.Convert_v1_DeploymentConfig_To_apps_DeploymentConfig(dcv1, dc, nil)
return dc, err
}