Skip to content

Commit fdca80f

Browse files
committed
manually revert kubernetes#109877
1 parent c75e833 commit fdca80f

File tree

7 files changed

+1
-594
lines changed

7 files changed

+1
-594
lines changed

pkg/scheduler/framework/plugins/volumebinding/binder.go

Lines changed: 0 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@ import (
4848
"k8s.io/kubernetes/pkg/features"
4949
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
5050
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
51-
"k8s.io/kubernetes/pkg/volume/util"
5251
)
5352

5453
// ConflictReason is used for the special strings which explain why
@@ -130,8 +129,6 @@ type InTreeToCSITranslator interface {
130129
// 1. The scheduler takes a Pod off the scheduler queue and processes it serially:
131130
// a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked
132131
// here, pod volume information will be saved in current scheduling cycle state for later use.
133-
// If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce
134-
// down the list of eligible nodes based on the bound PV's NodeAffinity (if any).
135132
// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here.
136133
// c. Invokes all score plugins. Future/TBD
137134
// d. Selects the best node for the Pod.
@@ -154,14 +151,6 @@ type SchedulerVolumeBinder interface {
154151
// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
155152
GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error)
156153

157-
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
158-
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
159-
// in subsequent scheduling stages.
160-
//
161-
// If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made
162-
// and all nodes should be considered.
163-
GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string])
164-
165154
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the
166155
// node and returns pod's volumes information.
167156
//
@@ -384,55 +373,6 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
384373
return
385374
}
386375

387-
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
388-
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
389-
// in subsequent scheduling stages.
390-
//
391-
// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes
392-
// should be considered.
393-
func (b *volumeBinder) GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
394-
if len(boundClaims) == 0 {
395-
return
396-
}
397-
398-
var errs []error
399-
for _, pvc := range boundClaims {
400-
pvName := pvc.Spec.VolumeName
401-
pv, err := b.pvCache.GetPV(pvName)
402-
if err != nil {
403-
errs = append(errs, err)
404-
continue
405-
}
406-
407-
// if the PersistentVolume is local and has node affinity matching specific node(s),
408-
// add them to the eligible nodes
409-
nodeNames := util.GetLocalPersistentVolumeNodeNames(pv)
410-
if len(nodeNames) != 0 {
411-
// on the first found list of eligible nodes for the local PersistentVolume,
412-
// insert to the eligible node set.
413-
if eligibleNodes == nil {
414-
eligibleNodes = sets.New(nodeNames...)
415-
} else {
416-
// for subsequent finding of eligible nodes for the local PersistentVolume,
417-
// take the intersection of the nodes with the existing eligible nodes
418-
// for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2,
419-
// then the eligible node list should be empty.
420-
eligibleNodes = eligibleNodes.Intersection(sets.New(nodeNames...))
421-
}
422-
}
423-
}
424-
425-
if len(errs) > 0 {
426-
logger.V(4).Info("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs)
427-
return nil
428-
}
429-
430-
if eligibleNodes != nil {
431-
logger.V(4).Info("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes)
432-
}
433-
return
434-
}
435-
436376
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
437377
// volume information for the chosen node, and:
438378
// 1. Update the pvCache with the new prebound PV.

pkg/scheduler/framework/plugins/volumebinding/binder_test.go

Lines changed: 0 additions & 141 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ import (
2020
"context"
2121
"fmt"
2222
"os"
23-
"reflect"
2423
"sort"
2524
"testing"
2625
"time"
@@ -32,7 +31,6 @@ import (
3231
"k8s.io/apimachinery/pkg/api/resource"
3332
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3433
"k8s.io/apimachinery/pkg/types"
35-
"k8s.io/apimachinery/pkg/util/sets"
3634
"k8s.io/apimachinery/pkg/util/wait"
3735
"k8s.io/apimachinery/pkg/watch"
3836
"k8s.io/client-go/informers"
@@ -63,9 +61,6 @@ var (
6361
boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass)
6462
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass)
6563
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass)
66-
localPreboundPVC1a = makeTestPVC("local-prebound-pvc-1a", "1G", "", pvcPrebound, "local-pv-node1a", "1", &waitClass)
67-
localPreboundPVC1b = makeTestPVC("local-prebound-pvc-1b", "1G", "", pvcPrebound, "local-pv-node1b", "1", &waitClass)
68-
localPreboundPVC2a = makeTestPVC("local-prebound-pvc-2a", "1G", "", pvcPrebound, "local-pv-node2a", "1", &waitClass)
6964

7065
// PVCs for dynamic provisioning
7166
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
@@ -97,9 +92,6 @@ var (
9792
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass)
9893
pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass)
9994
pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass)
100-
localPVNode1a = makeLocalPV("local-pv-node1a", "node1", "5G", "1", nil, waitClass)
101-
localPVNode1b = makeLocalPV("local-pv-node1b", "node1", "10G", "1", nil, waitClass)
102-
localPVNode2a = makeLocalPV("local-pv-node2a", "node2", "5G", "1", nil, waitClass)
10395

10496
// PVs for CSI migration
10597
migrationPVBound = makeTestPVForCSIMigration(zone1Labels, boundMigrationPVC, true)
@@ -709,12 +701,6 @@ func makeTestPVForCSIMigration(labels map[string]string, pvc *v1.PersistentVolum
709701
return pv
710702
}
711703

712-
func makeLocalPV(name, node, capacity, version string, boundToPVC *v1.PersistentVolumeClaim, className string) *v1.PersistentVolume {
713-
pv := makeTestPV(name, node, capacity, version, boundToPVC, className)
714-
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key = v1.LabelHostname
715-
return pv
716-
}
717-
718704
func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim {
719705
newPVC := pvc.DeepCopy()
720706
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnSelectedNode, node)
@@ -2326,130 +2312,3 @@ func TestCapacity(t *testing.T) {
23262312
})
23272313
}
23282314
}
2329-
2330-
func TestGetEligibleNodes(t *testing.T) {
2331-
type scenarioType struct {
2332-
// Inputs
2333-
pvcs []*v1.PersistentVolumeClaim
2334-
pvs []*v1.PersistentVolume
2335-
nodes []*v1.Node
2336-
2337-
// Expected return values
2338-
eligibleNodes sets.Set[string]
2339-
}
2340-
2341-
scenarios := map[string]scenarioType{
2342-
"no-bound-claims": {},
2343-
"no-nodes-found": {
2344-
pvcs: []*v1.PersistentVolumeClaim{
2345-
preboundPVC,
2346-
preboundPVCNode1a,
2347-
},
2348-
},
2349-
"pv-not-found": {
2350-
pvcs: []*v1.PersistentVolumeClaim{
2351-
preboundPVC,
2352-
preboundPVCNode1a,
2353-
},
2354-
nodes: []*v1.Node{
2355-
node1,
2356-
},
2357-
},
2358-
"node-affinity-mismatch": {
2359-
pvcs: []*v1.PersistentVolumeClaim{
2360-
preboundPVC,
2361-
preboundPVCNode1a,
2362-
},
2363-
pvs: []*v1.PersistentVolume{
2364-
pvNode1a,
2365-
},
2366-
nodes: []*v1.Node{
2367-
node1,
2368-
node2,
2369-
},
2370-
},
2371-
"local-pv-with-node-affinity": {
2372-
pvcs: []*v1.PersistentVolumeClaim{
2373-
localPreboundPVC1a,
2374-
localPreboundPVC1b,
2375-
},
2376-
pvs: []*v1.PersistentVolume{
2377-
localPVNode1a,
2378-
localPVNode1b,
2379-
},
2380-
nodes: []*v1.Node{
2381-
node1,
2382-
node2,
2383-
},
2384-
eligibleNodes: sets.New("node1"),
2385-
},
2386-
"multi-local-pv-with-different-nodes": {
2387-
pvcs: []*v1.PersistentVolumeClaim{
2388-
localPreboundPVC1a,
2389-
localPreboundPVC1b,
2390-
localPreboundPVC2a,
2391-
},
2392-
pvs: []*v1.PersistentVolume{
2393-
localPVNode1a,
2394-
localPVNode1b,
2395-
localPVNode2a,
2396-
},
2397-
nodes: []*v1.Node{
2398-
node1,
2399-
node2,
2400-
},
2401-
eligibleNodes: sets.New[string](),
2402-
},
2403-
"local-and-non-local-pv": {
2404-
pvcs: []*v1.PersistentVolumeClaim{
2405-
localPreboundPVC1a,
2406-
localPreboundPVC1b,
2407-
preboundPVC,
2408-
immediateBoundPVC,
2409-
},
2410-
pvs: []*v1.PersistentVolume{
2411-
localPVNode1a,
2412-
localPVNode1b,
2413-
pvNode1a,
2414-
pvBoundImmediate,
2415-
pvBoundImmediateNode2,
2416-
},
2417-
nodes: []*v1.Node{
2418-
node1,
2419-
node2,
2420-
},
2421-
eligibleNodes: sets.New("node1"),
2422-
},
2423-
}
2424-
2425-
run := func(t *testing.T, scenario scenarioType) {
2426-
logger, ctx := ktesting.NewTestContext(t)
2427-
ctx, cancel := context.WithCancel(ctx)
2428-
defer cancel()
2429-
2430-
// Setup
2431-
testEnv := newTestBinder(t, ctx)
2432-
testEnv.initVolumes(scenario.pvs, scenario.pvs)
2433-
2434-
testEnv.initNodes(scenario.nodes)
2435-
testEnv.initClaims(scenario.pvcs, scenario.pvcs)
2436-
2437-
// Execute
2438-
eligibleNodes := testEnv.binder.GetEligibleNodes(logger, scenario.pvcs)
2439-
2440-
// Validate
2441-
if reflect.DeepEqual(scenario.eligibleNodes, eligibleNodes) {
2442-
fmt.Println("foo")
2443-
}
2444-
2445-
if compDiff := cmp.Diff(scenario.eligibleNodes, eligibleNodes, cmp.Comparer(func(a, b sets.Set[string]) bool {
2446-
return reflect.DeepEqual(a, b)
2447-
})); compDiff != "" {
2448-
t.Errorf("Unexpected eligible nodes (-want +got):\n%s", compDiff)
2449-
}
2450-
}
2451-
2452-
for name, scenario := range scenarios {
2453-
t.Run(name, func(t *testing.T) { run(t, scenario) })
2454-
}
2455-
}

pkg/scheduler/framework/plugins/volumebinding/fake_binder.go

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ import (
2020
"context"
2121

2222
v1 "k8s.io/api/core/v1"
23-
"k8s.io/apimachinery/pkg/util/sets"
2423
"k8s.io/klog/v2"
2524
)
2625

@@ -55,11 +54,6 @@ func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVo
5554
return &PodVolumeClaims{}, nil
5655
}
5756

58-
// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes.
59-
func (b *FakeVolumeBinder) GetEligibleNodes(_ klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
60-
return nil
61-
}
62-
6357
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
6458
func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
6559
return nil, b.config.FindReasons, b.config.FindErr

pkg/scheduler/framework/plugins/volumebinding/volume_binding.go

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -340,14 +340,6 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
340340
status.AppendReason("pod has unbound immediate PersistentVolumeClaims")
341341
return nil, status
342342
}
343-
// Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims.
344-
var result *framework.PreFilterResult
345-
if eligibleNodes := pl.Binder.GetEligibleNodes(logger, podVolumeClaims.boundClaims); eligibleNodes != nil {
346-
result = &framework.PreFilterResult{
347-
NodeNames: eligibleNodes,
348-
}
349-
}
350-
351343
state.Write(stateKey, &stateData{
352344
podVolumesByNode: make(map[string]*PodVolumes),
353345
podVolumeClaims: &PodVolumeClaims{
@@ -356,7 +348,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
356348
unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding,
357349
},
358350
})
359-
return result, nil
351+
return nil, nil
360352
}
361353

362354
// PreFilterExtensions returns prefilter extensions, pod add and remove.

pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import (
2727
storagev1 "k8s.io/api/storage/v1"
2828
"k8s.io/apimachinery/pkg/api/resource"
2929
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30-
"k8s.io/apimachinery/pkg/util/sets"
3130
"k8s.io/client-go/informers"
3231
"k8s.io/client-go/kubernetes/fake"
3332
"k8s.io/klog/v2/ktesting"
@@ -127,43 +126,6 @@ func TestVolumeBinding(t *testing.T) {
127126
},
128127
wantPreScoreStatus: framework.NewStatus(framework.Skip),
129128
},
130-
{
131-
name: "all bound with local volumes",
132-
pod: makePod("pod-a").withPVCVolume("pvc-a", "volume-a").withPVCVolume("pvc-b", "volume-b").Pod,
133-
nodes: []*v1.Node{
134-
makeNode("node-a").Node,
135-
},
136-
pvcs: []*v1.PersistentVolumeClaim{
137-
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
138-
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
139-
},
140-
pvs: []*v1.PersistentVolume{
141-
makePV("pv-a", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
142-
v1.LabelHostname: {"node-a"},
143-
}).PersistentVolume,
144-
makePV("pv-b", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
145-
v1.LabelHostname: {"node-a"},
146-
}).PersistentVolume,
147-
},
148-
wantPreFilterResult: &framework.PreFilterResult{
149-
NodeNames: sets.New("node-a"),
150-
},
151-
wantStateAfterPreFilter: &stateData{
152-
podVolumeClaims: &PodVolumeClaims{
153-
boundClaims: []*v1.PersistentVolumeClaim{
154-
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
155-
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
156-
},
157-
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
158-
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
159-
},
160-
podVolumesByNode: map[string]*PodVolumes{},
161-
},
162-
wantFilterStatus: []*framework.Status{
163-
nil,
164-
},
165-
wantPreScoreStatus: framework.NewStatus(framework.Skip),
166-
},
167129
{
168130
name: "PVC does not exist",
169131
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,

0 commit comments

Comments
 (0)