diff --git a/CHANGELOG.md b/CHANGELOG.md index 15289cd44a2..072f1d7b55d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,8 @@ ### Improvements -- Graphite Scaler: use the latest datapoint returned, not the earliest (https://github.com/kedacore/keda/pull/2365) +- Graphite Scaler: use the latest datapoint returned, not the earliest ([#2365](https://github.com/kedacore/keda/pull/2365)) +- Kubernetes Workload Scaler: ignore terminated pods ([#2384](https://github.com/kedacore/keda/pull/2384)) - TODO ([#XXX](https://github.com/kedacore/keda/pull/XXX)) diff --git a/pkg/scalers/kubernetes_workload_scaler.go b/pkg/scalers/kubernetes_workload_scaler.go index 153683ee32e..b7185b96ead 100644 --- a/pkg/scalers/kubernetes_workload_scaler.go +++ b/pkg/scalers/kubernetes_workload_scaler.go @@ -27,6 +27,11 @@ const ( valueKey = "value" ) +var phasesCountedAsTerminated = []corev1.PodPhase{ + corev1.PodSucceeded, + corev1.PodFailed, +} + type kubernetesWorkloadMetadata struct { podSelector labels.Selector namespace string @@ -125,5 +130,19 @@ func (s *kubernetesWorkloadScaler) getMetricValue(ctx context.Context) (int, err return 0, err } - return len(podList.Items), nil + count := 0 + for _, pod := range podList.Items { + count += getCountValue(pod) + } + + return count, nil +} + +func getCountValue(pod corev1.Pod) int { + for _, ignore := range phasesCountedAsTerminated { + if pod.Status.Phase == ignore { + return 0 + } + } + return 1 } diff --git a/pkg/scalers/kubernetes_workload_scaler_test.go b/pkg/scalers/kubernetes_workload_scaler_test.go index e59361489f8..c99feb21628 100644 --- a/pkg/scalers/kubernetes_workload_scaler_test.go +++ b/pkg/scalers/kubernetes_workload_scaler_test.go @@ -3,6 +3,7 @@ package scalers import ( "context" "fmt" + "strings" "testing" "time" @@ -138,3 +139,58 @@ func createPodlist(count int) *v1.PodList { } return list } + +func TestWorkloadPhase(t *testing.T) { + phases := map[v1.PodPhase]bool{ + v1.PodRunning: true, + // succeeded and failed clearly count as terminated + v1.PodSucceeded: false, + v1.PodFailed: false, + // unknown could be for example a temporarily unresponsive node; count the pod + v1.PodUnknown: true, + // count pre-Running to avoid an additional delay on top of the poll interval + v1.PodPending: true, + } + for phase, active := range phases { + list := &v1.PodList{} + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.ToLower(fmt.Sprintf("phase-%s", phase)), + Namespace: "default", + Annotations: map[string]string{}, + Labels: map[string]string{ + "app": "testphases", + }, + }, + Status: v1.PodStatus{ + Phase: phase, + }, + } + list.Items = append(list.Items, *pod) + s, err := NewKubernetesWorkloadScaler( + fake.NewClientBuilder().WithRuntimeObjects(list).Build(), + &ScalerConfig{ + TriggerMetadata: map[string]string{ + "podSelector": "app=testphases", + "value": "1", + }, + AuthParams: map[string]string{}, + GlobalHTTPTimeout: 1000 * time.Millisecond, + Namespace: "default", + }, + ) + if err != nil { + t.Errorf("Failed to create test scaler -- %v", err) + } + isActive, err := s.IsActive(context.TODO()) + if err != nil { + t.Errorf("Failed to count active -- %v", err) + } + if active && !isActive { + t.Errorf("Expected active for phase %s but got inactive", phase) + } + if !active && isActive { + t.Errorf("Expected inactive for phase %s but got active", phase) + } + } +}