@@ -2588,6 +2588,8 @@ func TestHandlePodResourcesResize(t *testing.T) {
25882588 defer testKubelet .Cleanup ()
25892589 kubelet := testKubelet .kubelet
25902590
2591+ cpu1m := resource .MustParse ("1m" )
2592+ cpu2m := resource .MustParse ("2m" )
25912593 cpu500m := resource .MustParse ("500m" )
25922594 cpu1000m := resource .MustParse ("1" )
25932595 cpu1500m := resource .MustParse ("1500m" )
@@ -2671,7 +2673,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
26712673
26722674 tests := []struct {
26732675 name string
2674- pod * v1.Pod
2676+ originalRequests v1.ResourceList
26752677 newRequests v1.ResourceList
26762678 newRequestsAllocated bool // Whether the new requests have already been allocated (but not actuated)
26772679 expectedAllocations v1.ResourceList
@@ -2681,79 +2683,113 @@ func TestHandlePodResourcesResize(t *testing.T) {
26812683 }{
26822684 {
26832685 name : "Request CPU and memory decrease - expect InProgress" ,
2684- pod : testPod2 ,
2686+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
26852687 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem500M },
26862688 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem500M },
26872689 expectedResize : v1 .PodResizeStatusInProgress ,
26882690 expectBackoffReset : true ,
26892691 },
26902692 {
26912693 name : "Request CPU increase, memory decrease - expect InProgress" ,
2692- pod : testPod2 ,
2694+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
26932695 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem500M },
26942696 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem500M },
26952697 expectedResize : v1 .PodResizeStatusInProgress ,
26962698 expectBackoffReset : true ,
26972699 },
26982700 {
26992701 name : "Request CPU decrease, memory increase - expect InProgress" ,
2700- pod : testPod2 ,
2702+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27012703 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem1500M },
27022704 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem1500M },
27032705 expectedResize : v1 .PodResizeStatusInProgress ,
27042706 expectBackoffReset : true ,
27052707 },
27062708 {
27072709 name : "Request CPU and memory increase beyond current capacity - expect Deferred" ,
2708- pod : testPod2 ,
2710+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27092711 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu2500m , v1 .ResourceMemory : mem2500M },
27102712 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
27112713 expectedResize : v1 .PodResizeStatusDeferred ,
27122714 },
27132715 {
27142716 name : "Request CPU decrease and memory increase beyond current capacity - expect Deferred" ,
2715- pod : testPod2 ,
2717+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27162718 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem2500M },
27172719 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
27182720 expectedResize : v1 .PodResizeStatusDeferred ,
27192721 },
27202722 {
27212723 name : "Request memory increase beyond node capacity - expect Infeasible" ,
2722- pod : testPod2 ,
2724+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27232725 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem4500M },
27242726 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
27252727 expectedResize : v1 .PodResizeStatusInfeasible ,
27262728 },
27272729 {
27282730 name : "Request CPU increase beyond node capacity - expect Infeasible" ,
2729- pod : testPod2 ,
2731+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27302732 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu5000m , v1 .ResourceMemory : mem1000M },
27312733 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
27322734 expectedResize : v1 .PodResizeStatusInfeasible ,
27332735 },
27342736 {
27352737 name : "CPU increase in progress - expect InProgress" ,
2736- pod : testPod2 ,
2738+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27372739 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem1000M },
27382740 newRequestsAllocated : true ,
27392741 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1500m , v1 .ResourceMemory : mem1000M },
27402742 expectedResize : v1 .PodResizeStatusInProgress ,
27412743 },
27422744 {
27432745 name : "No resize" ,
2744- pod : testPod2 ,
2746+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27452747 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
27462748 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
27472749 expectedResize : "" ,
27482750 },
27492751 {
27502752 name : "windows node, expect Infeasible" ,
2751- pod : testPod2 ,
2753+ originalRequests : v1. ResourceList { v1 . ResourceCPU : cpu1000m , v1 . ResourceMemory : mem1000M } ,
27522754 newRequests : v1.ResourceList {v1 .ResourceCPU : cpu500m , v1 .ResourceMemory : mem500M },
27532755 expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m , v1 .ResourceMemory : mem1000M },
27542756 expectedResize : v1 .PodResizeStatusInfeasible ,
27552757 goos : "windows" ,
27562758 },
2759+ {
2760+ name : "Increase CPU from min shares" ,
2761+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2762+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m },
2763+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1000m },
2764+ expectedResize : v1 .PodResizeStatusInProgress ,
2765+ expectBackoffReset : true ,
2766+ },
2767+ {
2768+ name : "Decrease CPU to min shares" ,
2769+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu1000m },
2770+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2771+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2772+ expectedResize : v1 .PodResizeStatusInProgress ,
2773+ expectBackoffReset : true ,
2774+ },
2775+ {
2776+ name : "Equivalent min CPU shares" ,
2777+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu1m },
2778+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2779+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2780+ expectedResize : "" ,
2781+ // Even though the resize isn't being actuated, we still clear the container backoff
2782+ // since the allocation is changing.
2783+ expectBackoffReset : true ,
2784+ },
2785+ {
2786+ name : "Equivalent min CPU shares - already allocated" ,
2787+ originalRequests : v1.ResourceList {v1 .ResourceCPU : cpu2m },
2788+ newRequests : v1.ResourceList {v1 .ResourceCPU : cpu1m },
2789+ newRequestsAllocated : true ,
2790+ expectedAllocations : v1.ResourceList {v1 .ResourceCPU : cpu1m },
2791+ expectedResize : "" ,
2792+ },
27572793 }
27582794
27592795 for _ , tt := range tests {
@@ -2765,22 +2801,26 @@ func TestHandlePodResourcesResize(t *testing.T) {
27652801 }
27662802 kubelet .statusManager = status .NewFakeManager ()
27672803
2768- newPod := tt .pod .DeepCopy ()
2804+ originalPod := testPod1 .DeepCopy ()
2805+ originalPod .Spec .Containers [0 ].Resources .Requests = tt .originalRequests
2806+ kubelet .podManager .UpdatePod (originalPod )
2807+
2808+ newPod := originalPod .DeepCopy ()
27692809 newPod .Spec .Containers [0 ].Resources .Requests = tt .newRequests
27702810
27712811 if ! tt .newRequestsAllocated {
2772- require .NoError (t , kubelet .statusManager .SetPodAllocation (tt . pod ))
2812+ require .NoError (t , kubelet .statusManager .SetPodAllocation (originalPod ))
27732813 } else {
27742814 require .NoError (t , kubelet .statusManager .SetPodAllocation (newPod ))
27752815 }
27762816
27772817 podStatus := & kubecontainer.PodStatus {
2778- ID : tt . pod .UID ,
2779- Name : tt . pod .Name ,
2780- Namespace : tt . pod .Namespace ,
2781- ContainerStatuses : make ([]* kubecontainer.Status , len (tt . pod .Spec .Containers )),
2818+ ID : originalPod .UID ,
2819+ Name : originalPod .Name ,
2820+ Namespace : originalPod .Namespace ,
2821+ ContainerStatuses : make ([]* kubecontainer.Status , len (originalPod .Spec .Containers )),
27822822 }
2783- for i , c := range tt . pod .Spec .Containers {
2823+ for i , c := range originalPod .Spec .Containers {
27842824 podStatus .ContainerStatuses [i ] = & kubecontainer.Status {
27852825 Name : c .Name ,
27862826 State : kubecontainer .ContainerStateRunning ,
@@ -2794,7 +2834,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
27942834
27952835 now := kubelet .clock .Now ()
27962836 // Put the container in backoff so we can confirm backoff is reset.
2797- backoffKey := kuberuntime .GetStableKey (tt . pod , & tt . pod .Spec .Containers [0 ])
2837+ backoffKey := kuberuntime .GetStableKey (originalPod , & originalPod .Spec .Containers [0 ])
27982838 kubelet .backOff .Next (backoffKey , now )
27992839
28002840 updatedPod , err := kubelet .handlePodResourcesResize (newPod , podStatus )
0 commit comments