@@ -118,6 +118,17 @@ func (n *numaFirst) takeFullSecondLevel() {
118118 n .acc .takeFullSockets ()
119119}
120120
121+ // Sort the UncoreCaches within the NUMA nodes.
122+ func (a * cpuAccumulator ) sortAvailableUncoreCaches () []int {
123+ var result []int
124+ for _ , numa := range a .sortAvailableNUMANodes () {
125+ uncore := a .details .UncoreInNUMANodes (numa ).UnsortedList ()
126+ a .sort (uncore , a .details .CPUsInUncoreCaches )
127+ result = append (result , uncore ... )
128+ }
129+ return result
130+ }
131+
121132// If NUMA nodes are higher in the memory hierarchy than sockets, then just
122133// sort the NUMA nodes directly, and return them.
123134func (n * numaFirst ) sortAvailableNUMANodes () []int {
@@ -318,6 +329,12 @@ func (a *cpuAccumulator) isSocketFree(socketID int) bool {
318329 return a .details .CPUsInSockets (socketID ).Size () == a .topo .CPUsPerSocket ()
319330}
320331
332+ // Returns true if the supplied UnCoreCache is fully available,
333+ // "fully available" means that all the CPUs in it are free.
334+ func (a * cpuAccumulator ) isUncoreCacheFree (uncoreID int ) bool {
335+ return a .details .CPUsInUncoreCaches (uncoreID ).Size () == a .topo .CPUDetails .CPUsInUncoreCaches (uncoreID ).Size ()
336+ }
337+
321338// Returns true if the supplied core is fully available in `a.details`.
322339// "fully available" means that all the CPUs in it are free.
323340func (a * cpuAccumulator ) isCoreFree (coreID int ) bool {
@@ -346,6 +363,17 @@ func (a *cpuAccumulator) freeSockets() []int {
346363 return free
347364}
348365
366+ // Returns free UncoreCache IDs as a slice sorted by sortAvailableUnCoreCache().
367+ func (a * cpuAccumulator ) freeUncoreCache () []int {
368+ free := []int {}
369+ for _ , uncore := range a .sortAvailableUncoreCaches () {
370+ if a .isUncoreCacheFree (uncore ) {
371+ free = append (free , uncore )
372+ }
373+ }
374+ return free
375+ }
376+
349377// Returns free core IDs as a slice sorted by sortAvailableCores().
350378func (a * cpuAccumulator ) freeCores () []int {
351379 free := []int {}
@@ -519,6 +547,62 @@ func (a *cpuAccumulator) takeFullSockets() {
519547 }
520548}
521549
550+ func (a * cpuAccumulator ) takeFullUncore () {
551+ for _ , uncore := range a .freeUncoreCache () {
552+ cpusInUncore := a .topo .CPUDetails .CPUsInUncoreCaches (uncore )
553+ if ! a .needsAtLeast (cpusInUncore .Size ()) {
554+ continue
555+ }
556+ klog .V (4 ).InfoS ("takeFullUncore: claiming uncore" , "uncore" , uncore )
557+ a .take (cpusInUncore )
558+ }
559+ }
560+
561+ func (a * cpuAccumulator ) takePartialUncore (uncoreID int ) {
562+ numCoresNeeded := a .numCPUsNeeded / a .topo .CPUsPerCore ()
563+
564+ // determine the N number of free cores (physical cpus) within the UncoreCache, then
565+ // determine the M number of free cpus (virtual cpus) that correspond with the free cores
566+ freeCores := a .details .CoresNeededInUncoreCache (numCoresNeeded , uncoreID )
567+ freeCPUs := a .details .CPUsInCores (freeCores .UnsortedList ()... )
568+
569+ // claim the cpus if the free cpus within the UncoreCache can satisfy the needed cpus
570+ claimed := (a .numCPUsNeeded == freeCPUs .Size ())
571+ klog .V (4 ).InfoS ("takePartialUncore: trying to claim partial uncore" ,
572+ "uncore" , uncoreID ,
573+ "claimed" , claimed ,
574+ "needed" , a .numCPUsNeeded ,
575+ "cores" , freeCores .String (),
576+ "cpus" , freeCPUs .String ())
577+ if ! claimed {
578+ return
579+
580+ }
581+ a .take (freeCPUs )
582+ }
583+
584+ // First try to take full UncoreCache, if available and need is at least the size of the UncoreCache group.
585+ // Second try to take the partial UncoreCache if available and the request size can fit w/in the UncoreCache.
586+ func (a * cpuAccumulator ) takeUncoreCache () {
587+ numCPUsInUncore := a .topo .CPUsPerUncore ()
588+ for _ , uncore := range a .sortAvailableUncoreCaches () {
589+ // take full UncoreCache if the CPUs needed is greater than free UncoreCache size
590+ if a .needsAtLeast (numCPUsInUncore ) {
591+ a .takeFullUncore ()
592+ }
593+
594+ if a .isSatisfied () {
595+ return
596+ }
597+
598+ // take partial UncoreCache if the CPUs needed is less than free UncoreCache size
599+ a .takePartialUncore (uncore )
600+ if a .isSatisfied () {
601+ return
602+ }
603+ }
604+ }
605+
522606func (a * cpuAccumulator ) takeFullCores () {
523607 for _ , core := range a .freeCores () {
524608 cpusInCore := a .topo .CPUDetails .CPUsInCores (core )
@@ -637,6 +721,14 @@ func (a *cpuAccumulator) iterateCombinations(n []int, k int, f func([]int) LoopC
637721// or the remaining number of CPUs to take after having taken full sockets and NUMA nodes is less
638722// than a whole NUMA node, the function tries to take whole physical cores (cores).
639723//
724+ // If `PreferAlignByUncoreCache` is enabled, the function will try to optimally assign Uncorecaches.
725+ // If `numCPUs` is larger than or equal to the total number of CPUs in a Uncorecache, and there are
726+ // free (i.e. all CPUs within the Uncorecache are free) Uncorecaches, the function takes as many entire
727+ // cores from free Uncorecaches as possible. If/Once `numCPUs` is smaller than the total number of
728+ // CPUs in a free Uncorecache, the function scans each Uncorecache index in numerical order to assign
729+ // cores that will fit within the Uncorecache. If `numCPUs` cannot fit within any Uncorecache, the
730+ // function tries to take whole physical cores.
731+ //
640732// If `numCPUs` is bigger than the total number of CPUs in a core, and there are
641733// free (i.e. all CPUs in them are free) cores, the function takes as many entire free cores as possible.
642734// The cores are taken from one socket at a time, and the sockets are considered by
@@ -658,7 +750,7 @@ func (a *cpuAccumulator) iterateCombinations(n []int, k int, f func([]int) LoopC
658750// the least amount of free CPUs to the one with the highest amount of free CPUs (i.e. in ascending
659751// order of free CPUs). For any NUMA node, the cores are selected from the ones in the socket with
660752// the least amount of free CPUs to the one with the highest amount of free CPUs.
661- func takeByTopologyNUMAPacked (topo * topology.CPUTopology , availableCPUs cpuset.CPUSet , numCPUs int , cpuSortingStrategy CPUSortingStrategy ) (cpuset.CPUSet , error ) {
753+ func takeByTopologyNUMAPacked (topo * topology.CPUTopology , availableCPUs cpuset.CPUSet , numCPUs int , cpuSortingStrategy CPUSortingStrategy , preferAlignByUncoreCache bool ) (cpuset.CPUSet , error ) {
662754 acc := newCPUAccumulator (topo , availableCPUs , numCPUs , cpuSortingStrategy )
663755 if acc .isSatisfied () {
664756 return acc .result , nil
@@ -681,7 +773,17 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
681773 return acc .result , nil
682774 }
683775
684- // 2. Acquire whole cores, if available and the container requires at least
776+ // 2. If PreferAlignByUncoreCache is enabled, acquire whole UncoreCaches
777+ // if available and the container requires at least a UncoreCache's-worth
778+ // of CPUs. Otherwise, acquire CPUs from the least amount of UncoreCaches.
779+ if preferAlignByUncoreCache {
780+ acc .takeUncoreCache ()
781+ if acc .isSatisfied () {
782+ return acc .result , nil
783+ }
784+ }
785+
786+ // 3. Acquire whole cores, if available and the container requires at least
685787 // a core's-worth of CPUs.
686788 // If `CPUSortingStrategySpread` is specified, skip taking the whole core.
687789 if cpuSortingStrategy != CPUSortingStrategySpread {
@@ -691,7 +793,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
691793 }
692794 }
693795
694- // 3 . Acquire single threads, preferring to fill partially-allocated cores
796+ // 4 . Acquire single threads, preferring to fill partially-allocated cores
695797 // on the same sockets as the whole cores we have already taken in this
696798 // allocation.
697799 acc .takeRemainingCPUs ()
@@ -769,8 +871,10 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
769871 // If the number of CPUs requested cannot be handed out in chunks of
770872 // 'cpuGroupSize', then we just call out the packing algorithm since we
771873 // can't distribute CPUs in this chunk size.
874+ // PreferAlignByUncoreCache feature not implemented here yet and set to false.
875+ // Support for PreferAlignByUncoreCache to be done at beta release.
772876 if (numCPUs % cpuGroupSize ) != 0 {
773- return takeByTopologyNUMAPacked (topo , availableCPUs , numCPUs , cpuSortingStrategy )
877+ return takeByTopologyNUMAPacked (topo , availableCPUs , numCPUs , cpuSortingStrategy , false )
774878 }
775879
776880 // Otherwise build an accumulator to start allocating CPUs from.
@@ -953,7 +1057,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
9531057 // size 'cpuGroupSize' from 'bestCombo'.
9541058 distribution := (numCPUs / len (bestCombo ) / cpuGroupSize ) * cpuGroupSize
9551059 for _ , numa := range bestCombo {
956- cpus , _ := takeByTopologyNUMAPacked (acc .topo , acc .details .CPUsInNUMANodes (numa ), distribution , cpuSortingStrategy )
1060+ cpus , _ := takeByTopologyNUMAPacked (acc .topo , acc .details .CPUsInNUMANodes (numa ), distribution , cpuSortingStrategy , false )
9571061 acc .take (cpus )
9581062 }
9591063
@@ -968,7 +1072,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
9681072 if acc .details .CPUsInNUMANodes (numa ).Size () < cpuGroupSize {
9691073 continue
9701074 }
971- cpus , _ := takeByTopologyNUMAPacked (acc .topo , acc .details .CPUsInNUMANodes (numa ), cpuGroupSize , cpuSortingStrategy )
1075+ cpus , _ := takeByTopologyNUMAPacked (acc .topo , acc .details .CPUsInNUMANodes (numa ), cpuGroupSize , cpuSortingStrategy , false )
9721076 acc .take (cpus )
9731077 remainder -= cpuGroupSize
9741078 }
@@ -992,5 +1096,5 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
9921096
9931097 // If we never found a combination of NUMA nodes that we could properly
9941098 // distribute CPUs across, fall back to the packing algorithm.
995- return takeByTopologyNUMAPacked (topo , availableCPUs , numCPUs , cpuSortingStrategy )
1099+ return takeByTopologyNUMAPacked (topo , availableCPUs , numCPUs , cpuSortingStrategy , false )
9961100}
0 commit comments