Skip to content

Commit

Permalink
chore: e2e test improvements (#5226)
Browse files Browse the repository at this point in the history
  • Loading branch information
JorTurFer authored Dec 3, 2023
1 parent e8fcb84 commit 54b05f3
Show file tree
Hide file tree
Showing 52 changed files with 264 additions and 186 deletions.
2 changes: 1 addition & 1 deletion config/e2e/patch_operator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
path: /spec/template/spec/containers/0/env/-
value:
name: OTEL_EXPORTER_OTLP_ENDPOINT
value: "http://opentelemetry-collector.default.svc.cluster.local:4318"
value: "http://opentelemetry-collector.open-telemetry-system.svc.cluster.local:4318"

- op: add
path: /spec/template/spec/containers/0/env/-
Expand Down
2 changes: 1 addition & 1 deletion tests/clean-crds.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
echo "Cleaning up CRDs before undeploying KEDA"
while read -r namespace
do
resources=$(kubectl get so,sj,ta,cta -n $namespace -o name)
resources=$(kubectl get so,sj,ta,cta,cloudeventsource -n $namespace -o name)
if [[ -n "$resources" ]]
then
kubectl delete $resources -n $namespace
Expand Down
70 changes: 60 additions & 10 deletions tests/helper/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ func ExecCommandOnSpecificPod(t *testing.T, podName string, namespace string, co
}
buf := &bytes.Buffer{}
errBuf := &bytes.Buffer{}
request := KubeClient.CoreV1().RESTClient().Post().
request := GetKubernetesClient(t).CoreV1().RESTClient().Post().
Resource("pods").Name(podName).Namespace(namespace).
SubResource("exec").Timeout(time.Second*20).
VersionedParams(&corev1.PodExecOptions{
Expand Down Expand Up @@ -243,7 +243,7 @@ func CreateNamespace(t *testing.T, kc *kubernetes.Clientset, nsName string) {
func DeleteNamespace(t *testing.T, nsName string) {
t.Logf("deleting namespace %s", nsName)
period := int64(0)
err := KubeClient.CoreV1().Namespaces().Delete(context.Background(), nsName, metav1.DeleteOptions{
err := GetKubernetesClient(t).CoreV1().Namespaces().Delete(context.Background(), nsName, metav1.DeleteOptions{
GracePeriodSeconds: &period,
})
if errors.IsNotFound(err) {
Expand Down Expand Up @@ -295,7 +295,7 @@ func WaitForAllJobsSuccess(t *testing.T, kc *kubernetes.Clientset, namespace str
func WaitForNamespaceDeletion(t *testing.T, nsName string) bool {
for i := 0; i < 120; i++ {
t.Logf("waiting for namespace %s deletion", nsName)
_, err := KubeClient.CoreV1().Namespaces().Get(context.Background(), nsName, metav1.GetOptions{})
_, err := GetKubernetesClient(t).CoreV1().Namespaces().Get(context.Background(), nsName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true
}
Expand Down Expand Up @@ -563,6 +563,27 @@ func KubectlApplyMultipleWithTemplate(t *testing.T, data interface{}, templates
}
}

func KubectlReplaceWithTemplate(t *testing.T, data interface{}, templateName string, config string) {
t.Logf("Applying template: %s", templateName)

tmpl, err := template.New("kubernetes resource template").Parse(config)
assert.NoErrorf(t, err, "cannot parse template - %s", err)

tempFile, err := os.CreateTemp("", templateName)
assert.NoErrorf(t, err, "cannot create temp file - %s", err)

defer os.Remove(tempFile.Name())

err = tmpl.Execute(tempFile, data)
assert.NoErrorf(t, err, "cannot insert data into template - %s", err)

_, err = ExecuteCommand(fmt.Sprintf("kubectl replace -f %s --force", tempFile.Name()))
assert.NoErrorf(t, err, "cannot replace file - %s", err)

err = tempFile.Close()
assert.NoErrorf(t, err, "cannot close temp file - %s", err)
}

func KubectlDeleteWithTemplate(t *testing.T, data interface{}, templateName, config string) {
t.Logf("Deleting template: %s", templateName)

Expand Down Expand Up @@ -613,21 +634,22 @@ func RemoveANSI(input string) string {
return reg.ReplaceAllString(input, "")
}

func FindPodLogs(kc *kubernetes.Clientset, namespace, label string) ([]string, error) {
var podLogs []string
func FindPodLogs(kc *kubernetes.Clientset, namespace, label string, includePrevious bool) ([]string, error) {
pods, err := kc.CoreV1().Pods(namespace).List(context.TODO(),
metav1.ListOptions{LabelSelector: label})
if err != nil {
return []string{}, err
}
var podLogRequest *rest.Request
for _, v := range pods.Items {
podLogRequest = kc.CoreV1().Pods(namespace).GetLogs(v.Name, &corev1.PodLogOptions{})
getPodLogs := func(pod *corev1.Pod, previous bool) ([]string, error) {
podLogRequest := kc.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{
Previous: previous,
})
stream, err := podLogRequest.Stream(context.TODO())
if err != nil {
return []string{}, err
}
defer stream.Close()
logs := []string{}
for {
buf := make([]byte, 2000)
numBytes, err := stream.Read(buf)
Expand All @@ -640,10 +662,38 @@ func FindPodLogs(kc *kubernetes.Clientset, namespace, label string) ([]string, e
if err != nil {
return []string{}, err
}
podLogs = append(podLogs, string(buf[:numBytes]))
logs = append(logs, string(buf[:numBytes]))
}
return logs, nil
}

var outputLogs []string
for _, pod := range pods.Items {
getPrevious := false
if includePrevious {
for _, container := range pod.Status.ContainerStatuses {
if container.RestartCount > 0 {
getPrevious = true
}
}
}

if getPrevious {
podLogs, err := getPodLogs(&pod, true)
if err != nil {
return []string{}, err
}
outputLogs = append(outputLogs, podLogs...)
outputLogs = append(outputLogs, "=====================RESTART=====================\n")
}

podLogs, err := getPodLogs(&pod, false)
if err != nil {
return []string{}, err
}
outputLogs = append(outputLogs, podLogs...)
}
return podLogs, nil
return outputLogs, nil
}

// Delete all pods in namespace by selector
Expand Down
45 changes: 35 additions & 10 deletions tests/internals/cloudevent_source/cloudevent_source_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"testing"
"time"

cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"k8s.io/client-go/kubernetes"
Expand All @@ -31,6 +32,9 @@ var (
cloudEventHTTPReceiverName = fmt.Sprintf("%s-cloudevent-http-receiver", testName)
cloudEventHTTPServiceName = fmt.Sprintf("%s-cloudevent-http-service", testName)
cloudEventHTTPServiceURL = fmt.Sprintf("http://%s.%s.svc.cluster.local:8899", cloudEventHTTPServiceName, namespace)
clusterName = "test-cluster"
expectedSubject = fmt.Sprintf("/%s/%s/workload/%s", clusterName, namespace, scaledObjectName)
expectedSource = fmt.Sprintf("/%s/%s/keda", clusterName, namespace)
)

type templateData struct {
Expand All @@ -41,6 +45,7 @@ type templateData struct {
CloudEventHTTPReceiverName string
CloudEventHTTPServiceName string
CloudEventHTTPServiceURL string
ClusterName string
}

const (
Expand All @@ -51,7 +56,7 @@ const (
name: {{.CloudEventSourceName}}
namespace: {{.TestNamespace}}
spec:
clusterName: cluster-sample
clusterName: {{.ClusterName}}
destination:
http:
uri: {{.CloudEventHTTPServiceURL}}
Expand Down Expand Up @@ -157,16 +162,35 @@ func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data tem
t.Log("--- test emitting eventsource about scaledobject err---")
KubectlApplyWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate)

// recreate database to clear it
out, _, _ := ExecCommandOnSpecificPod(t, clientName, namespace, fmt.Sprintf("curl -X GET %s/getCloudEvent/%s", cloudEventHTTPServiceURL, "ScaledObjectCheckFailed"))

assert.NotNil(t, out)

cloudEvent := make(map[string]interface{})
err := json.Unmarshal([]byte(out), &cloudEvent)
// wait 15 seconds to ensure event propagation
time.Sleep(15 * time.Second)

assert.Nil(t, err)
assert.Equal(t, cloudEvent["data"].(map[string]interface{})["message"], "ScaledObject doesn't have correct scaleTargetRef specification")
out, outErr, err := ExecCommandOnSpecificPod(t, clientName, namespace, fmt.Sprintf("curl -X GET %s/getCloudEvent/%s", cloudEventHTTPServiceURL, "ScaledObjectCheckFailed"))
assert.NotEmpty(t, out)
assert.Empty(t, outErr)
assert.NoError(t, err, "dont expect error requesting ")

cloudEvents := []cloudevents.Event{}
err = json.Unmarshal([]byte(out), &cloudEvents)

assert.NoError(t, err, "dont expect error unmarshaling the cloudEvents")
assert.Greater(t, len(cloudEvents), 0, "cloudEvents should have at least 1 item")

foundEvents := []cloudevents.Event{}

for _, cloudEvent := range cloudEvents {
if cloudEvent.Subject() == expectedSubject {
foundEvents = append(foundEvents, cloudEvent)
data := map[string]string{}
err := cloudEvent.DataAs(&data)
assert.NoError(t, err)
assert.Equal(t, data["message"], "ScaledObject doesn't have correct scaleTargetRef specification")
assert.Equal(t, cloudEvent.Type(), "com.cloudeventsource.keda")
assert.Equal(t, cloudEvent.Source(), expectedSource)
assert.Equal(t, cloudEvent.DataContentType(), "application/json")
}
}
assert.NotEmpty(t, foundEvents)
}

// help function to load template data
Expand All @@ -179,6 +203,7 @@ func getTemplateData() (templateData, []Template) {
CloudEventHTTPReceiverName: cloudEventHTTPReceiverName,
CloudEventHTTPServiceName: cloudEventHTTPServiceName,
CloudEventHTTPServiceURL: cloudEventHTTPServiceURL,
ClusterName: clusterName,
}, []Template{
{Name: "cloudEventHTTPReceiverTemplate", Config: cloudEventHTTPReceiverTemplate},
{Name: "cloudEventHTTPServiceTemplate", Config: cloudEventHTTPServiceTemplate},
Expand Down
4 changes: 2 additions & 2 deletions tests/internals/fallback/fallback_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ func TestFallback(t *testing.T) {
func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale out ---")
data.MetricValue = 50
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, maxReplicas, 60, 3),
"replica count should be %d after 3 minutes", maxReplicas)
Expand All @@ -269,7 +269,7 @@ func testRestoreAfterFallback(t *testing.T, kc *kubernetes.Clientset, data templ
t.Log("--- testing after fallback ---")
KubectlApplyWithTemplate(t, data, "metricsServerDeploymentTemplate", metricsServerDeploymentTemplate)
data.MetricValue = 50
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, maxReplicas, 60, 3),
"replica count should be %d after 3 minutes", maxReplicas)
Expand Down
4 changes: 2 additions & 2 deletions tests/internals/global_custom_ca/global_custom_ca_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ func TestCustomCa(t *testing.T) {
func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale out ---")
data.MetricValue = 50
KubectlApplyWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
"replica count should be %d after 3 minutes", maxReplicaCount)
Expand All @@ -249,7 +249,7 @@ func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale in ---")
data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3),
"replica count should be %d after 3 minutes", minReplicaCount)
Expand Down
12 changes: 6 additions & 6 deletions tests/internals/polling_cooldown_so/polling_cooldown_so_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func testPollingIntervalUp(t *testing.T, kc *kubernetes.Clientset, data template
t.Log("--- test Polling Interval up ---")

data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

// wait some seconds to finish the job
WaitForJobCount(t, kc, namespace, 0, 15, 2)
Expand All @@ -232,7 +232,7 @@ func testPollingIntervalUp(t *testing.T, kc *kubernetes.Clientset, data template
assert.NoError(t, err)

data.MetricValue = maxReplicas
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, minReplicas, 60)

Expand All @@ -246,7 +246,7 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa
t.Log("--- test Polling Interval down ---")

data.MetricValue = 1
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

// wait some seconds to finish the job
WaitForJobCount(t, kc, namespace, 0, 15, 2)
Expand All @@ -264,7 +264,7 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa
assert.NoError(t, err)

data.MetricValue = minReplicas
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, maxReplicas, 60)

Expand All @@ -282,7 +282,7 @@ func testCooldownPeriod(t *testing.T, kc *kubernetes.Clientset, data templateDat
KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)

data.MetricValue = 1
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

// wait some seconds to finish the job
WaitForJobCount(t, kc, namespace, 0, 15, 2)
Expand All @@ -299,7 +299,7 @@ func testCooldownPeriod(t *testing.T, kc *kubernetes.Clientset, data templateDat
assert.NoError(t, err)

data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, maxReplicas, 60)

Expand Down
8 changes: 4 additions & 4 deletions tests/internals/replica_update_so/replica_update_so_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func TestScaler(t *testing.T) {
func scaleMaxReplicasUp(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- scale up after MaxReplicas change ---")
data.MetricValue = 100
KubectlApplyWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)

KubectlApplyWithTemplate(t, data, "scaledObjectTriggerTemplate", scaledObjectTriggerTemplate)

Expand All @@ -234,7 +234,7 @@ func scaleMaxReplicasUp(t *testing.T, kc *kubernetes.Clientset, data templateDat
func scaleMaxReplicasDown(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- scale max replicas down ---")
data.MetricValue = 100
KubectlApplyWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)

updatedMaxReplicas := maxReplicas + 10
data.MaxReplicas = strconv.Itoa(updatedMaxReplicas)
Expand All @@ -254,7 +254,7 @@ func scaleMaxReplicasDown(t *testing.T, kc *kubernetes.Clientset, data templateD
func scaleMinReplicasUpFromZero(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- scale min replicas up from zero ---")
data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)

KubectlApplyWithTemplate(t, data, "scaledObjectTriggerTemplate", scaledObjectTriggerTemplate)

Expand All @@ -273,7 +273,7 @@ func scaleMinReplicasUpFromZero(t *testing.T, kc *kubernetes.Clientset, data tem
func scaleMinReplicasDownToZero(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- scale min replicas down to zero ---")
data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate)

// set minReplicas to higher number at first
updatedMinReplicas := minReplicas + 5
Expand Down
10 changes: 5 additions & 5 deletions tests/internals/scaling_modifiers/scaling_modifiers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ spec:
maxReplicaCount: 10
fallback:
replicas: 5
failureThreshold: 1
failureThreshold: 3
triggers:
- type: metrics-api
name: metrics_api
Expand Down Expand Up @@ -241,12 +241,12 @@ func testFormula(t *testing.T, kc *kubernetes.Clientset, data templateData) {
// formula simply adds 2 metrics together (0+2=2; activationTarget = 2 -> replicas should be 0)
KubectlApplyWithTemplate(t, data, "soFallbackTemplate", soFallbackTemplate)
data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, 0, 60)

// formula simply adds 2 metrics together (3+2=5; target = 2 -> 5/2 replicas should be 3)
data.MetricValue = 3
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

_, err := ExecuteCommand(fmt.Sprintf("kubectl scale deployment/depl-workload-base --replicas=2 -n %s", namespace))
assert.NoErrorf(t, err, "cannot scale workload deployment - %s", err)
Expand All @@ -256,7 +256,7 @@ func testFormula(t *testing.T, kc *kubernetes.Clientset, data templateData) {
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 3, 12, 10),
"replica count should be %d after 2 minutes", 3)

// apply fallback fallback
// apply fallback
_, err = ExecuteCommand(fmt.Sprintf("kubectl scale deployment/%s --replicas=0 -n %s", metricsServerDeploymentName, namespace))
assert.NoErrorf(t, err, "cannot scale metricsServer deployment - %s", err)

Expand All @@ -268,7 +268,7 @@ func testFormula(t *testing.T, kc *kubernetes.Clientset, data templateData) {
assert.NoErrorf(t, err, "cannot scale metricsServer deployment - %s", err)

data.MetricValue = 2
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
// 2+2=4; target = 2 -> 4/2 replicas should be 2
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, 2, 12, 10),
"replica count should be %d after 2 minutes", 2)
Expand Down
Loading

0 comments on commit 54b05f3

Please sign in to comment.