Skip to content

Commit 478a6f9

Browse files
committed
Add a tests for PreferSameNode
1 parent 0ee6b0d commit 478a6f9

File tree

1 file changed

+110
-0
lines changed

1 file changed

+110
-0
lines changed

test/e2e/network/traffic_distribution.go

+110
Original file line numberDiff line numberDiff line change
@@ -372,4 +372,114 @@ var _ = common.SIGDescribe("Traffic Distribution", func() {
372372
checkTrafficDistribution(ctx, clientPods)
373373
})
374374

375+
framework.It("should route traffic to an endpoint on the same node or fall back to same zone when using PreferSameNode", framework.WithFeatureGate(features.PreferSameTrafficDistribution), func(ctx context.Context) {
376+
ginkgo.By("finding a set of nodes for the test")
377+
zone1Nodes, zone2Nodes, zone3Nodes := getNodesForMultiNode(ctx)
378+
379+
var clientPods []*clientPod
380+
var serverPods []*serverPod
381+
382+
// The first zone: a client and a server on each node. Each client only
383+
// talks to the server on the same node.
384+
endpointsForZone := []*serverPod{
385+
{node: zone1Nodes[0]},
386+
{node: zone1Nodes[1]},
387+
}
388+
clientPods = append(clientPods,
389+
&clientPod{
390+
node: zone1Nodes[0],
391+
endpoints: []*serverPod{endpointsForZone[0]},
392+
},
393+
&clientPod{
394+
node: zone1Nodes[1],
395+
endpoints: []*serverPod{endpointsForZone[1]},
396+
},
397+
)
398+
serverPods = append(serverPods, endpointsForZone...)
399+
400+
// The second zone: a client on one node and a server on the other. The
401+
// client should fall back to connecting (only) to its same-zone endpoint.
402+
endpointsForZone = []*serverPod{
403+
{node: zone2Nodes[1]},
404+
}
405+
clientPods = append(clientPods,
406+
&clientPod{
407+
node: zone2Nodes[0],
408+
endpoints: endpointsForZone,
409+
},
410+
)
411+
serverPods = append(serverPods, endpointsForZone...)
412+
413+
// The third zone: just a client. Since it has neither a same-node nor a
414+
// same-zone endpoint, it should connect to all endpoints.
415+
clientPods = append(clientPods,
416+
&clientPod{
417+
node: zone3Nodes[0],
418+
endpoints: serverPods,
419+
},
420+
)
421+
422+
svc := createService(ctx, v1.ServiceTrafficDistributionPreferSameNode)
423+
createPods(ctx, svc, clientPods, serverPods)
424+
checkTrafficDistribution(ctx, clientPods)
425+
})
426+
427+
framework.It("should route traffic to an endpoint on the same node when using PreferSameNode and fall back when the endpoint becomes unavailable", framework.WithFeatureGate(features.PreferSameTrafficDistribution), func(ctx context.Context) {
428+
ginkgo.By("finding a set of nodes for the test")
429+
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c)
430+
framework.ExpectNoError(err)
431+
if len(nodeList.Items) < 2 {
432+
e2eskipper.Skipf("have %d schedulable nodes, need at least 2", len(nodeList.Items))
433+
}
434+
nodes := nodeList.Items[:2]
435+
436+
// One client and one server on each node
437+
serverPods := []*serverPod{
438+
{node: &nodes[0]},
439+
{node: &nodes[1]},
440+
}
441+
clientPods := []*clientPod{
442+
{
443+
node: &nodes[0],
444+
endpoints: []*serverPod{serverPods[0]},
445+
},
446+
{
447+
node: &nodes[1],
448+
endpoints: []*serverPod{serverPods[1]},
449+
},
450+
}
451+
452+
svc := createService(ctx, v1.ServiceTrafficDistributionPreferSameNode)
453+
createPods(ctx, svc, clientPods, serverPods)
454+
455+
ginkgo.By("ensuring that each client talks to its same-node endpoint when both endpoints exist")
456+
checkTrafficDistribution(ctx, clientPods)
457+
458+
ginkgo.By("killing the server pod on the first node and waiting for the EndpointSlices to be updated")
459+
err = c.CoreV1().Pods(f.Namespace.Name).Delete(ctx, serverPods[0].pod.Name, metav1.DeleteOptions{})
460+
framework.ExpectNoError(err)
461+
err = framework.WaitForServiceEndpointsNum(ctx, c, svc.Namespace, svc.Name, 1, 1*time.Second, e2eservice.ServiceEndpointsTimeout)
462+
framework.ExpectNoError(err)
463+
464+
ginkgo.By("ensuring that both clients talk to the remaining endpoint when only one endpoint exists")
465+
serverPods[0].pod = nil
466+
clientPods[0].endpoints = []*serverPod{serverPods[1]}
467+
checkTrafficDistribution(ctx, clientPods)
468+
469+
ginkgo.By("recreating the missing server pod and waiting for the EndpointSlices to be updated")
470+
// We can't use createPods() here because if we only tell it about
471+
// serverPods[0] and not serverPods[1] it will expect there to be only one
472+
// endpoint.
473+
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "server-0-new", nil, nil, nil, "serve-hostname")
474+
nodeSelection := e2epod.NodeSelection{Name: serverPods[0].node.Name}
475+
e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
476+
pod.Labels = svc.Spec.Selector
477+
serverPods[0].pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
478+
err = framework.WaitForServiceEndpointsNum(ctx, c, svc.Namespace, svc.Name, 2, 1*time.Second, e2eservice.ServiceEndpointsTimeout)
479+
framework.ExpectNoError(err)
480+
481+
ginkgo.By("ensuring that each client talks only to its same-node endpoint again")
482+
clientPods[0].endpoints = []*serverPod{serverPods[0]}
483+
checkTrafficDistribution(ctx, clientPods)
484+
})
375485
})

0 commit comments

Comments
 (0)