Skip to content

Commit ca9f2e6

Browse files
committed
Merge pull request oracle#199 in OKE/oci-cloud-controller-manager from task/OKE-18125 to internal
* commit 'e1cb4074cbf30e2d895207abba29e4c95db799a0': Added missing break statement Reverted pod start timeout change Changed test to make deployment use a single node to spawn pods Changed test to exec into restarted pod to check data JIRA:task/OKE-18125 added test to check data integrity on pod restart
2 parents db06fbd + e1cb407 commit ca9f2e6

File tree

4 files changed

+251
-0
lines changed

4 files changed

+251
-0
lines changed

test/e2e/cloud-provider-oci/csi_volume_creation.go

+9
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,15 @@ var _ = Describe("CSI Volume Creation", func() {
5858

5959
pvcJig.CheckVolumeCapacity("100Gi", pvc.Name, f.Namespace.Name)
6060
})
61+
62+
It("Data should persist on CSI volume on pod restart", func() {
63+
pvcJig := framework.NewPVCTestJig(f.ClientSet, "csi-pod-restart-data-persistence")
64+
65+
scName := f.CreateStorageClassOrFail(framework.ClassOCICSI, "blockvolume.csi.oraclecloud.com", nil, pvcJig.Labels, "WaitForFirstConsumer")
66+
pvc := pvcJig.CreateAndAwaitPVCOrFailCSI(f.Namespace.Name, framework.MinVolumeBlock, scName, nil)
67+
68+
pvcJig.CheckDataPersistenceWithDeployment(pvc.Name, f.Namespace.Name)
69+
})
6170
})
6271
})
6372

test/e2e/framework/deployment_util.go

+92
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
package framework
2+
3+
import (
4+
appsv1 "k8s.io/api/apps/v1"
5+
v1 "k8s.io/api/core/v1"
6+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7+
"k8s.io/apimachinery/pkg/util/wait"
8+
"k8s.io/utils/pointer"
9+
"time"
10+
)
11+
12+
func (j *PVCTestJig) createDeploymentOnNodeAndWait(command string, pvcName string, ns string, name string, replicas int32, nodeSelectorLabels map[string]string) string{
13+
deployment, err := j.KubeClient.AppsV1().Deployments(ns).Create(&appsv1.Deployment{
14+
ObjectMeta: metav1.ObjectMeta{
15+
Name: name,
16+
},
17+
Spec: appsv1.DeploymentSpec{
18+
Replicas: pointer.Int32Ptr(replicas),
19+
Selector: &metav1.LabelSelector{
20+
MatchLabels: map[string]string{
21+
"app": name,
22+
},
23+
},
24+
Template: v1.PodTemplateSpec{
25+
ObjectMeta: metav1.ObjectMeta{
26+
Labels: map[string]string{
27+
"app": name,
28+
},
29+
},
30+
Spec: v1.PodSpec{
31+
Containers: []v1.Container{
32+
{
33+
Name: name,
34+
Image: centos,
35+
Command: []string{"/bin/sh"},
36+
Args: []string{"-c", command},
37+
VolumeMounts: []v1.VolumeMount{
38+
{
39+
Name: "persistent-storage",
40+
MountPath: "/data",
41+
},
42+
},
43+
},
44+
},
45+
Volumes: []v1.Volume{
46+
{
47+
Name: "persistent-storage",
48+
VolumeSource: v1.VolumeSource{
49+
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
50+
ClaimName: pvcName,
51+
},
52+
},
53+
},
54+
},
55+
NodeSelector: nodeSelectorLabels,
56+
},
57+
},
58+
},
59+
})
60+
61+
if err != nil{
62+
Failf("Error creating deployment %v: %v", name, err)
63+
}
64+
65+
// Waiting for deployment to be completed
66+
Logf("Waiting up to %v for deployment %v to be completed", deploymentAvailableTimeout, deployment.Name)
67+
err = j.waitTimeoutForDeploymentAvailable(deployment.Name, ns, deploymentAvailableTimeout, replicas)
68+
if err != nil {
69+
Failf("Deployment %q did not complete: %v", deployment.Name, err)
70+
}
71+
72+
return deployment.Name
73+
}
74+
75+
// waitTimeoutForDeploymentCompleted waits default amount of time (deploymentCompletionTimeout) for the specified deployment to complete
76+
//Returns an error if timeout occurs first, or pod goes in to failed state.
77+
func (j *PVCTestJig) waitTimeoutForDeploymentAvailable(deploymentName string, namespace string, timeout time.Duration, replicas int32) error {
78+
return wait.PollImmediate(Poll, timeout, j.deploymentAvailable(deploymentName, namespace, replicas))
79+
}
80+
81+
func (j *PVCTestJig) deploymentAvailable(deploymentName string, namespace string, replicas int32) wait.ConditionFunc {
82+
return func() (bool, error) {
83+
deployment, err := j.KubeClient.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
84+
if err != nil {
85+
return false, err
86+
}
87+
if deployment.Status.AvailableReplicas == replicas {
88+
return true, nil
89+
}
90+
return false, nil
91+
}
92+
}

test/e2e/framework/framework.go

+2
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@ const (
3232
// Some pods can take much longer to get ready due to volume attach/detach latency.
3333
slowPodStartTimeout = 15 * time.Minute
3434

35+
deploymentAvailableTimeout = 5 * time.Minute
36+
3537
DefaultClusterKubeconfig = "/tmp/clusterkubeconfig"
3638
DefaultCloudConfig = "/tmp/cloudconfig"
3739

test/e2e/framework/pvc_util.go

+148
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,15 @@
1515
package framework
1616

1717
import (
18+
"bytes"
1819
"fmt"
20+
"io"
21+
"k8s.io/apimachinery/pkg/labels"
22+
"k8s.io/client-go/kubernetes/scheme"
23+
"k8s.io/client-go/tools/clientcmd"
24+
"k8s.io/client-go/tools/remotecommand"
25+
"os"
26+
"path/filepath"
1927
"strings"
2028
"time"
2129

@@ -873,3 +881,143 @@ func (j *PVCTestJig) CheckMultiplePodReadWrite(namespace string, pvcName string,
873881
By("Creating Pod that can read contents of existing file")
874882
j.NewPodForCSIFSSRead(string(uuid2), namespace, pvcName, fileName)
875883
}
884+
885+
func (j *PVCTestJig) CheckDataPersistenceWithDeployment(pvcName string, ns string){
886+
nodes, err := j.KubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
887+
888+
if err!= nil{
889+
Failf("Error getting list of nodes: %v", err)
890+
}
891+
892+
if len(nodes.Items) == 0{
893+
Failf("No worker nodes are present in the cluster")
894+
}
895+
896+
nodeSelectorLabels := map[string]string{}
897+
schedulableNodeFound := false
898+
899+
for _, node := range nodes.Items {
900+
if node.Spec.Unschedulable == false {
901+
schedulableNodeFound = true
902+
nodeSelectorLabels = node.Labels
903+
break
904+
}
905+
}
906+
907+
if !schedulableNodeFound{
908+
Failf("No schedulable nodes found")
909+
}
910+
911+
podRunningCommand := " while true; do true; done;"
912+
913+
dataWritten := "Data written"
914+
915+
writeCommand := "echo \"" + dataWritten +"\" >> /data/out.txt;"
916+
readCommand := "cat /data/out.txt"
917+
918+
By("Creating a deployment")
919+
deploymentName := j.createDeploymentOnNodeAndWait(podRunningCommand, pvcName, ns, "data-persistence-deployment", 1, nodeSelectorLabels)
920+
921+
deployment, err := j.KubeClient.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
922+
923+
if err!= nil {
924+
Failf("Error while fetching deployment %v: %v", deploymentName, err)
925+
}
926+
927+
set := labels.Set(deployment.Spec.Selector.MatchLabels)
928+
pods, err := j.KubeClient.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: set.AsSelector().String()})
929+
930+
if err != nil {
931+
Failf("Error getting list of pods: %v", err)
932+
}
933+
934+
podName := pods.Items[0].Name
935+
936+
By("Writing to the volume using the pod")
937+
_, _, err = j.ExecCommandOnPod(podName, writeCommand, nil, ns)
938+
939+
if err!= nil{
940+
Failf("Error executing write command a pod: %v", err)
941+
}
942+
943+
By("Deleting the pod used to write to the volume")
944+
err = j.KubeClient.CoreV1().Pods(ns).Delete(podName, &metav1.DeleteOptions{})
945+
946+
if err!= nil{
947+
Failf("Error deleting pod: %v", err)
948+
}
949+
950+
By("Waiting for pod to be restarted")
951+
err = j.waitTimeoutForDeploymentAvailable(deploymentName, ns, deploymentAvailableTimeout, 1)
952+
953+
if err!= nil {
954+
Failf("Error waiting for deployment to become available again: %v", err)
955+
}
956+
957+
pods, err = j.KubeClient.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: set.AsSelector().String()})
958+
959+
if err != nil {
960+
Failf("Error getting list of pods: %v", err)
961+
}
962+
963+
podName = pods.Items[0].Name
964+
965+
By("Reading from the volume using the pod and checking data integrity")
966+
stdout, _, err := j.ExecCommandOnPod(podName, readCommand, nil, ns)
967+
968+
if err!= nil{
969+
Failf("Error executing write command a pod: %v", err)
970+
}
971+
972+
if dataWritten != strings.TrimSpace(stdout){
973+
Failf("Written data not found on the volume, written: %v, found: %v", dataWritten, strings.TrimSpace(stdout))
974+
}
975+
976+
}
977+
978+
func (j *PVCTestJig) ExecCommandOnPod(podName string, command string, stdin io.Reader, ns string) (string, string, error) {
979+
cmd := []string{
980+
"sh",
981+
"-c",
982+
command,
983+
}
984+
985+
req := j.KubeClient.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(ns).SubResource("exec")
986+
option := &v1.PodExecOptions{
987+
Command: cmd,
988+
Stdin: true,
989+
Stdout: true,
990+
Stderr: true,
991+
TTY: true,
992+
}
993+
if stdin == nil {
994+
option.Stdin = false
995+
}
996+
req.VersionedParams(
997+
option,
998+
scheme.ParameterCodec,
999+
)
1000+
1001+
kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config_amd")
1002+
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
1003+
1004+
if err != nil {
1005+
return "", "", fmt.Errorf("error while retrieving kubeconfig: %v", err)
1006+
}
1007+
1008+
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
1009+
if err != nil {
1010+
return "", "", fmt.Errorf("error while creating Executor: %v", err)
1011+
}
1012+
var stdout, stderr bytes.Buffer
1013+
err = exec.Stream(remotecommand.StreamOptions{
1014+
Stdin: stdin,
1015+
Stdout: &stdout,
1016+
Stderr: &stderr,
1017+
})
1018+
if err != nil {
1019+
return "", "", fmt.Errorf("error in Stream: %v", err)
1020+
}
1021+
1022+
return stdout.String(), stderr.String(), nil
1023+
}

0 commit comments

Comments
 (0)