Skip to content

Commit 3c62f73

Browse files
Merge pull request #2136 from ffromani/ocp-split-l3-cache
OCPBUGS-44786: add support for the LLC alignment cpumanager policy option
2 parents 8ac36bf + 6fded69 commit 3c62f73

11 files changed

+1394
-351
lines changed

Diff for: pkg/kubelet/cm/cpumanager/cpu_assignment.go

+111-7
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,17 @@ func (n *numaFirst) takeFullSecondLevel() {
118118
n.acc.takeFullSockets()
119119
}
120120

121+
// Sort the UncoreCaches within the NUMA nodes.
122+
func (a *cpuAccumulator) sortAvailableUncoreCaches() []int {
123+
var result []int
124+
for _, numa := range a.sortAvailableNUMANodes() {
125+
uncore := a.details.UncoreInNUMANodes(numa).UnsortedList()
126+
a.sort(uncore, a.details.CPUsInUncoreCaches)
127+
result = append(result, uncore...)
128+
}
129+
return result
130+
}
131+
121132
// If NUMA nodes are higher in the memory hierarchy than sockets, then just
122133
// sort the NUMA nodes directly, and return them.
123134
func (n *numaFirst) sortAvailableNUMANodes() []int {
@@ -318,6 +329,12 @@ func (a *cpuAccumulator) isSocketFree(socketID int) bool {
318329
return a.details.CPUsInSockets(socketID).Size() == a.topo.CPUsPerSocket()
319330
}
320331

332+
// Returns true if the supplied UnCoreCache is fully available,
333+
// "fully available" means that all the CPUs in it are free.
334+
func (a *cpuAccumulator) isUncoreCacheFree(uncoreID int) bool {
335+
return a.details.CPUsInUncoreCaches(uncoreID).Size() == a.topo.CPUDetails.CPUsInUncoreCaches(uncoreID).Size()
336+
}
337+
321338
// Returns true if the supplied core is fully available in `a.details`.
322339
// "fully available" means that all the CPUs in it are free.
323340
func (a *cpuAccumulator) isCoreFree(coreID int) bool {
@@ -346,6 +363,17 @@ func (a *cpuAccumulator) freeSockets() []int {
346363
return free
347364
}
348365

366+
// Returns free UncoreCache IDs as a slice sorted by sortAvailableUnCoreCache().
367+
func (a *cpuAccumulator) freeUncoreCache() []int {
368+
free := []int{}
369+
for _, uncore := range a.sortAvailableUncoreCaches() {
370+
if a.isUncoreCacheFree(uncore) {
371+
free = append(free, uncore)
372+
}
373+
}
374+
return free
375+
}
376+
349377
// Returns free core IDs as a slice sorted by sortAvailableCores().
350378
func (a *cpuAccumulator) freeCores() []int {
351379
free := []int{}
@@ -519,6 +547,62 @@ func (a *cpuAccumulator) takeFullSockets() {
519547
}
520548
}
521549

550+
func (a *cpuAccumulator) takeFullUncore() {
551+
for _, uncore := range a.freeUncoreCache() {
552+
cpusInUncore := a.topo.CPUDetails.CPUsInUncoreCaches(uncore)
553+
if !a.needsAtLeast(cpusInUncore.Size()) {
554+
continue
555+
}
556+
klog.V(4).InfoS("takeFullUncore: claiming uncore", "uncore", uncore)
557+
a.take(cpusInUncore)
558+
}
559+
}
560+
561+
func (a *cpuAccumulator) takePartialUncore(uncoreID int) {
562+
numCoresNeeded := a.numCPUsNeeded / a.topo.CPUsPerCore()
563+
564+
// determine the N number of free cores (physical cpus) within the UncoreCache, then
565+
// determine the M number of free cpus (virtual cpus) that correspond with the free cores
566+
freeCores := a.details.CoresNeededInUncoreCache(numCoresNeeded, uncoreID)
567+
freeCPUs := a.details.CPUsInCores(freeCores.UnsortedList()...)
568+
569+
// claim the cpus if the free cpus within the UncoreCache can satisfy the needed cpus
570+
claimed := (a.numCPUsNeeded == freeCPUs.Size())
571+
klog.V(4).InfoS("takePartialUncore: trying to claim partial uncore",
572+
"uncore", uncoreID,
573+
"claimed", claimed,
574+
"needed", a.numCPUsNeeded,
575+
"cores", freeCores.String(),
576+
"cpus", freeCPUs.String())
577+
if !claimed {
578+
return
579+
580+
}
581+
a.take(freeCPUs)
582+
}
583+
584+
// First try to take full UncoreCache, if available and need is at least the size of the UncoreCache group.
585+
// Second try to take the partial UncoreCache if available and the request size can fit w/in the UncoreCache.
586+
func (a *cpuAccumulator) takeUncoreCache() {
587+
numCPUsInUncore := a.topo.CPUsPerUncore()
588+
for _, uncore := range a.sortAvailableUncoreCaches() {
589+
// take full UncoreCache if the CPUs needed is greater than free UncoreCache size
590+
if a.needsAtLeast(numCPUsInUncore) {
591+
a.takeFullUncore()
592+
}
593+
594+
if a.isSatisfied() {
595+
return
596+
}
597+
598+
// take partial UncoreCache if the CPUs needed is less than free UncoreCache size
599+
a.takePartialUncore(uncore)
600+
if a.isSatisfied() {
601+
return
602+
}
603+
}
604+
}
605+
522606
func (a *cpuAccumulator) takeFullCores() {
523607
for _, core := range a.freeCores() {
524608
cpusInCore := a.topo.CPUDetails.CPUsInCores(core)
@@ -637,6 +721,14 @@ func (a *cpuAccumulator) iterateCombinations(n []int, k int, f func([]int) LoopC
637721
// or the remaining number of CPUs to take after having taken full sockets and NUMA nodes is less
638722
// than a whole NUMA node, the function tries to take whole physical cores (cores).
639723
//
724+
// If `PreferAlignByUncoreCache` is enabled, the function will try to optimally assign Uncorecaches.
725+
// If `numCPUs` is larger than or equal to the total number of CPUs in a Uncorecache, and there are
726+
// free (i.e. all CPUs within the Uncorecache are free) Uncorecaches, the function takes as many entire
727+
// cores from free Uncorecaches as possible. If/Once `numCPUs` is smaller than the total number of
728+
// CPUs in a free Uncorecache, the function scans each Uncorecache index in numerical order to assign
729+
// cores that will fit within the Uncorecache. If `numCPUs` cannot fit within any Uncorecache, the
730+
// function tries to take whole physical cores.
731+
//
640732
// If `numCPUs` is bigger than the total number of CPUs in a core, and there are
641733
// free (i.e. all CPUs in them are free) cores, the function takes as many entire free cores as possible.
642734
// The cores are taken from one socket at a time, and the sockets are considered by
@@ -658,7 +750,7 @@ func (a *cpuAccumulator) iterateCombinations(n []int, k int, f func([]int) LoopC
658750
// the least amount of free CPUs to the one with the highest amount of free CPUs (i.e. in ascending
659751
// order of free CPUs). For any NUMA node, the cores are selected from the ones in the socket with
660752
// the least amount of free CPUs to the one with the highest amount of free CPUs.
661-
func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy) (cpuset.CPUSet, error) {
753+
func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy, preferAlignByUncoreCache bool) (cpuset.CPUSet, error) {
662754
acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy)
663755
if acc.isSatisfied() {
664756
return acc.result, nil
@@ -681,7 +773,17 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
681773
return acc.result, nil
682774
}
683775

684-
// 2. Acquire whole cores, if available and the container requires at least
776+
// 2. If PreferAlignByUncoreCache is enabled, acquire whole UncoreCaches
777+
// if available and the container requires at least a UncoreCache's-worth
778+
// of CPUs. Otherwise, acquire CPUs from the least amount of UncoreCaches.
779+
if preferAlignByUncoreCache {
780+
acc.takeUncoreCache()
781+
if acc.isSatisfied() {
782+
return acc.result, nil
783+
}
784+
}
785+
786+
// 3. Acquire whole cores, if available and the container requires at least
685787
// a core's-worth of CPUs.
686788
// If `CPUSortingStrategySpread` is specified, skip taking the whole core.
687789
if cpuSortingStrategy != CPUSortingStrategySpread {
@@ -691,7 +793,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
691793
}
692794
}
693795

694-
// 3. Acquire single threads, preferring to fill partially-allocated cores
796+
// 4. Acquire single threads, preferring to fill partially-allocated cores
695797
// on the same sockets as the whole cores we have already taken in this
696798
// allocation.
697799
acc.takeRemainingCPUs()
@@ -769,8 +871,10 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
769871
// If the number of CPUs requested cannot be handed out in chunks of
770872
// 'cpuGroupSize', then we just call out the packing algorithm since we
771873
// can't distribute CPUs in this chunk size.
874+
// PreferAlignByUncoreCache feature not implemented here yet and set to false.
875+
// Support for PreferAlignByUncoreCache to be done at beta release.
772876
if (numCPUs % cpuGroupSize) != 0 {
773-
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy)
877+
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false)
774878
}
775879

776880
// Otherwise build an accumulator to start allocating CPUs from.
@@ -953,7 +1057,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
9531057
// size 'cpuGroupSize' from 'bestCombo'.
9541058
distribution := (numCPUs / len(bestCombo) / cpuGroupSize) * cpuGroupSize
9551059
for _, numa := range bestCombo {
956-
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), distribution, cpuSortingStrategy)
1060+
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), distribution, cpuSortingStrategy, false)
9571061
acc.take(cpus)
9581062
}
9591063

@@ -968,7 +1072,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
9681072
if acc.details.CPUsInNUMANodes(numa).Size() < cpuGroupSize {
9691073
continue
9701074
}
971-
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize, cpuSortingStrategy)
1075+
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize, cpuSortingStrategy, false)
9721076
acc.take(cpus)
9731077
remainder -= cpuGroupSize
9741078
}
@@ -992,5 +1096,5 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
9921096

9931097
// If we never found a combination of NUMA nodes that we could properly
9941098
// distribute CPUs across, fall back to the packing algorithm.
995-
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy)
1099+
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false)
9961100
}

Diff for: pkg/kubelet/cm/cpumanager/cpu_assignment_test.go

+75-2
Original file line numberDiff line numberDiff line change
@@ -668,6 +668,79 @@ func TestTakeByTopologyNUMAPacked(t *testing.T) {
668668
"",
669669
mustParseCPUSet(t, "0-29,40-69,30,31,70,71"),
670670
},
671+
// Test cases for PreferAlignByUncoreCache
672+
{
673+
"take cpus from two full UncoreCaches and partial from a single UncoreCache",
674+
topoUncoreSingleSocketNoSMT,
675+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
676+
mustParseCPUSet(t, "1-15"),
677+
10,
678+
"",
679+
cpuset.New(1, 2, 4, 5, 6, 7, 8, 9, 10, 11),
680+
},
681+
{
682+
"take one cpu from dual socket with HT - core from Socket 0",
683+
topoDualSocketHT,
684+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
685+
cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
686+
1,
687+
"",
688+
cpuset.New(2),
689+
},
690+
{
691+
"take first available UncoreCache from first socket",
692+
topoUncoreDualSocketNoSMT,
693+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
694+
mustParseCPUSet(t, "0-15"),
695+
4,
696+
"",
697+
cpuset.New(0, 1, 2, 3),
698+
},
699+
{
700+
"take all available UncoreCache from first socket",
701+
topoUncoreDualSocketNoSMT,
702+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
703+
mustParseCPUSet(t, "2-15"),
704+
6,
705+
"",
706+
cpuset.New(2, 3, 4, 5, 6, 7),
707+
},
708+
{
709+
"take first available UncoreCache from second socket",
710+
topoUncoreDualSocketNoSMT,
711+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
712+
mustParseCPUSet(t, "8-15"),
713+
4,
714+
"",
715+
cpuset.New(8, 9, 10, 11),
716+
},
717+
{
718+
"take first available UncoreCache from available NUMA",
719+
topoUncoreSingleSocketMultiNuma,
720+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
721+
mustParseCPUSet(t, "3,4-8,12"),
722+
2,
723+
"",
724+
cpuset.New(4, 5),
725+
},
726+
{
727+
"take cpus from best available UncoreCache group of multi uncore cache single socket - SMT enabled",
728+
topoUncoreSingleSocketSMT,
729+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
730+
mustParseCPUSet(t, "2-3,10-11,4-7,12-15"),
731+
6,
732+
"",
733+
cpuset.New(4, 5, 6, 12, 13, 14),
734+
},
735+
{
736+
"take cpus from multiple UncoreCache of single socket - SMT enabled",
737+
topoUncoreSingleSocketSMT,
738+
StaticPolicyOptions{PreferAlignByUncoreCacheOption: true},
739+
mustParseCPUSet(t, "1-7,9-15"),
740+
10,
741+
"",
742+
mustParseCPUSet(t, "4-7,12-15,1,9"),
743+
},
671744
}...)
672745

673746
for _, tc := range testCases {
@@ -677,7 +750,7 @@ func TestTakeByTopologyNUMAPacked(t *testing.T) {
677750
strategy = CPUSortingStrategySpread
678751
}
679752

680-
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy)
753+
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy, tc.opts.PreferAlignByUncoreCacheOption)
681754
if tc.expErr != "" && err != nil && err.Error() != tc.expErr {
682755
t.Errorf("expected error to be [%v] but it was [%v]", tc.expErr, err)
683756
}
@@ -778,7 +851,7 @@ func TestTakeByTopologyWithSpreadPhysicalCPUsPreferredOption(t *testing.T) {
778851
if tc.opts.DistributeCPUsAcrossCores {
779852
strategy = CPUSortingStrategySpread
780853
}
781-
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy)
854+
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy, tc.opts.PreferAlignByUncoreCacheOption)
782855
if tc.expErr != "" && err.Error() != tc.expErr {
783856
t.Errorf("testCase %q failed, expected error to be [%v] but it was [%v]", tc.description, tc.expErr, err)
784857
}

Diff for: pkg/kubelet/cm/cpumanager/cpu_manager_test.go

+12-8
Original file line numberDiff line numberDiff line change
@@ -651,20 +651,24 @@ func TestCPUManagerGenerate(t *testing.T) {
651651
{
652652
Cores: []cadvisorapi.Core{
653653
{
654-
Id: 0,
655-
Threads: []int{0},
654+
Id: 0,
655+
Threads: []int{0},
656+
UncoreCaches: []cadvisorapi.Cache{{Id: 1}},
656657
},
657658
{
658-
Id: 1,
659-
Threads: []int{1},
659+
Id: 1,
660+
Threads: []int{1},
661+
UncoreCaches: []cadvisorapi.Cache{{Id: 1}},
660662
},
661663
{
662-
Id: 2,
663-
Threads: []int{2},
664+
Id: 2,
665+
Threads: []int{2},
666+
UncoreCaches: []cadvisorapi.Cache{{Id: 1}},
664667
},
665668
{
666-
Id: 3,
667-
Threads: []int{3},
669+
Id: 3,
670+
Threads: []int{3},
671+
UncoreCaches: []cadvisorapi.Cache{{Id: 1}},
668672
},
669673
},
670674
},

0 commit comments

Comments
 (0)