From 130bc9323dd952edfd75f7021d5c7829cc0924a9 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Fri, 11 Feb 2022 16:32:07 +0100 Subject: [PATCH] Improve e2e test reliability (#2580) Signed-off-by: jorturfer --- CHANGELOG.md | 2 +- config/crd/bases/keda.sh_scaledjobs.yaml | 621 ++++++++++++------ tests/package.json | 2 +- tests/run-all.sh | 40 +- tests/scalers/activemq.test.ts | 6 - tests/scalers/argo-rollouts.test.ts | 12 +- tests/scalers/artemis-helpers.ts | 3 - tests/scalers/azure-pipelines.test.ts | 59 +- ...re-queue-restore-original-replicas.test.ts | 48 +- .../scalers/azure-queue-trigger-auth.test.ts | 7 +- tests/scalers/azure-queue.test.ts | 68 +- tests/scalers/cassandra.test.ts | 28 +- tests/scalers/elasticsearch.test.ts | 5 - tests/scalers/graphite.test.ts | 4 +- tests/scalers/mongodb.test.ts | 1 - tests/scalers/mysql.test.ts | 4 +- tests/scalers/new-relic.test.ts | 36 +- tests/scalers/openstack-swift.test.ts | 4 +- tests/scalers/predictkube.test.ts | 6 +- tests/scalers/prometheus.test.ts | 4 +- tests/scalers/redis-cluster-lists.test.ts | 34 +- tests/scalers/redis-cluster-streams.test.ts | 8 +- tests/scalers/redis-lists.test.ts | 32 +- tests/scalers/redis-sentinel-lists.test.ts | 34 +- tests/scalers/redis-sentinel-streams.test.ts | 8 +- tests/scalers/redis-streams.test.ts | 10 +- tests/scalers/selenium-grid.test.ts | 107 +-- 27 files changed, 696 insertions(+), 497 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24f3586df98..fa37d120de4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,7 +34,7 @@ ### Improvements -- TODO ([#XXX](https://github.com/kedacore/keda/issue/XXX)) +- Improve e2e tests reliability ([#2580](https://github.com/kedacore/keda/issues/2580)) ### Breaking Changes diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index 19edd6ca0d6..d9ef90afbf7 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -1404,9 +1404,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -1477,10 +1476,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -1505,21 +1506,19 @@ spec: API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container - crashes or exits. The reason for termination - is passed to the handler. The Pod''s termination - grace period countdown begins before the PreStop - hooked is executed. Regardless of the outcome + crashes or exits. The Pod''s termination grace + period countdown begins before the PreStop + hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace - period. Other management of the container - blocks until the hook completes or until the - termination grace period is reached. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + period (unless delayed by finalizers). Other + management of the container blocks until the + hook completes or until the termination grace + period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -1590,10 +1589,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -1619,9 +1620,7 @@ spec: Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -1645,6 +1644,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -1712,10 +1731,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -1823,9 +1840,7 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -1849,6 +1864,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -1916,10 +1951,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -2006,13 +2039,16 @@ spec: controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN' + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name + is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set + when spec.os.name is windows. properties: add: description: Added capabilities @@ -2033,7 +2069,8 @@ spec: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to - false. + false. Note that this field cannot be set + when spec.os.name is windows. type: boolean procMount: description: procMount denotes the type of proc @@ -2041,11 +2078,14 @@ spec: is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature - flag to be enabled. + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only - root filesystem. Default is false. + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of @@ -2053,7 +2093,8 @@ spec: if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -2075,6 +2116,8 @@ spec: be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -2084,7 +2127,8 @@ spec: for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -2107,7 +2151,9 @@ spec: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container - options override the pod options. + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. properties: localhostProfile: description: localhostProfile indicates @@ -2136,7 +2182,8 @@ spec: from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where @@ -2190,9 +2237,7 @@ spec: cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -2216,6 +2261,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -2283,10 +2348,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -2516,22 +2579,21 @@ spec: it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field - is alpha-level and is only honored by servers that enable - the EphemeralContainers feature. + is beta-level and available on clusters that haven't + disabled the EphemeralContainers feature gate. items: - description: An EphemeralContainer is a container that - may be added temporarily to an existing pod for user-initiated + description: "An EphemeralContainer is a temporary container + that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they - will not be restarted when they exit or when a pod - is removed or restarted. If an ephemeral container - causes a pod to exceed its resource allocation, the - pod may be evicted. Ephemeral containers may not be - added by directly updating the pod spec. They must - be added via the pod's ephemeralcontainers subresource, - and they will appear in the pod spec once added. This - is an alpha feature enabled by the EphemeralContainers - feature flag. + will not be restarted when they exit or when a Pod + is removed or restarted. The kubelet may evict a Pod + if an ephemeral container causes the Pod to exceed + its resource allocation. \n To add an ephemeral container, + use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + \n This is a beta feature available on clusters that + haven't disabled the EphemeralContainers feature gate." properties: args: description: 'Arguments to the entrypoint. The docker @@ -2757,9 +2819,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -2830,10 +2891,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -2858,21 +2921,19 @@ spec: API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container - crashes or exits. The reason for termination - is passed to the handler. The Pod''s termination - grace period countdown begins before the PreStop - hooked is executed. Regardless of the outcome + crashes or exits. The Pod''s termination grace + period countdown begins before the PreStop + hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace - period. Other management of the container - blocks until the hook completes or until the - termination grace period is reached. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + period (unless delayed by finalizers). Other + management of the container blocks until the + hook completes or until the termination grace + period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -2943,10 +3004,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -2971,9 +3034,7 @@ spec: containers. properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -2997,6 +3058,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -3064,10 +3145,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -3159,14 +3238,16 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: description: Probes are not allowed for ephemeral containers. properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -3190,6 +3271,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -3257,10 +3358,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -3348,13 +3447,16 @@ spec: controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN' + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name + is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set + when spec.os.name is windows. properties: add: description: Added capabilities @@ -3375,7 +3477,8 @@ spec: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to - false. + false. Note that this field cannot be set + when spec.os.name is windows. type: boolean procMount: description: procMount denotes the type of proc @@ -3383,11 +3486,14 @@ spec: is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature - flag to be enabled. + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only - root filesystem. Default is false. + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of @@ -3395,7 +3501,8 @@ spec: if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -3417,6 +3524,8 @@ spec: be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -3426,7 +3535,8 @@ spec: for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -3449,7 +3559,9 @@ spec: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container - options override the pod options. + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. properties: localhostProfile: description: localhostProfile indicates @@ -3478,7 +3590,8 @@ spec: from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where @@ -3524,9 +3637,7 @@ spec: containers. properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -3550,6 +3661,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -3617,10 +3748,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -3686,13 +3815,15 @@ spec: stdin will never receive an EOF. Default is false type: boolean targetContainerName: - description: If set, the name of the container from - PodSpec that this ephemeral container targets. + description: "If set, the name of the container + from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set - then the ephemeral container is run in whatever - namespaces are shared for the pod. Note that the - container runtime must support this feature. + then the ephemeral container uses the namespaces + configured in the Pod spec. \n The container runtime + must implement support for this feature. If the + runtime does not support namespace targeting then + the result of setting this field is undefined." type: string terminationMessagePath: description: 'Optional: Path at which the file to @@ -3744,7 +3875,8 @@ spec: type: array volumeMounts: description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + filesystem. Subpath mounts are not allowed for + ephemeral containers. Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. @@ -4106,9 +4238,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -4179,10 +4310,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -4207,21 +4340,19 @@ spec: API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container - crashes or exits. The reason for termination - is passed to the handler. The Pod''s termination - grace period countdown begins before the PreStop - hooked is executed. Regardless of the outcome + crashes or exits. The Pod''s termination grace + period countdown begins before the PreStop + hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace - period. Other management of the container - blocks until the hook completes or until the - termination grace period is reached. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + period (unless delayed by finalizers). Other + management of the container blocks until the + hook completes or until the termination grace + period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -4292,10 +4423,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -4321,9 +4454,7 @@ spec: Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -4347,6 +4478,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -4414,10 +4565,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -4525,9 +4674,7 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -4551,6 +4698,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -4618,10 +4785,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -4708,13 +4873,16 @@ spec: controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN' + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name + is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set + when spec.os.name is windows. properties: add: description: Added capabilities @@ -4735,7 +4903,8 @@ spec: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to - false. + false. Note that this field cannot be set + when spec.os.name is windows. type: boolean procMount: description: procMount denotes the type of proc @@ -4743,11 +4912,14 @@ spec: is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature - flag to be enabled. + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only - root filesystem. Default is false. + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of @@ -4755,7 +4927,8 @@ spec: if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -4777,6 +4950,8 @@ spec: be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -4786,7 +4961,8 @@ spec: for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -4809,7 +4985,9 @@ spec: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container - options override the pod options. + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. properties: localhostProfile: description: localhostProfile indicates @@ -4838,7 +5016,8 @@ spec: from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where @@ -4892,9 +5071,7 @@ spec: cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -4918,6 +5095,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -4985,10 +5182,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -5173,6 +5368,38 @@ spec: that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' type: object x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the + pod. Some pod and container fields are restricted if + this is set. \n If the OS field is set to linux, the + following fields must be unset: -securityContext.windowsOptions + \n If the OS field is set to windows, following fields + must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls + - spec.shareProcessNamespace - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities - + spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup This + is an alpha field and requires the IdentifyPodOS feature" + properties: + name: + description: 'Name is the name of the operating system. + The currently supported values are linux and windows. + Additional value may be defined in future and can + be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values + and treat unrecognized values in this field as os: + null' + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -5273,7 +5500,8 @@ spec: created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership - and permissions of any volume." + and permissions of any volume. Note that this field + cannot be set when spec.os.name is windows." format: int64 type: integer fsGroupChangePolicy: @@ -5284,7 +5512,9 @@ spec: based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" - and "Always". If not specified, "Always" is used.' + and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name + is windows.' type: string runAsGroup: description: The GID to run the entrypoint of the @@ -5292,7 +5522,8 @@ spec: May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for - that container. + that container. Note that this field cannot be set + when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -5311,7 +5542,8 @@ spec: image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. + takes precedence for that container. Note that this + field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: @@ -5321,7 +5553,8 @@ spec: container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. + for that container. Note that this field cannot + be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that @@ -5342,7 +5575,8 @@ spec: type: object seccompProfile: description: The seccomp options to use by the containers - in this pod. + in this pod. Note that this field cannot be set + when spec.os.name is windows. properties: localhostProfile: description: localhostProfile indicates a profile @@ -5367,7 +5601,8 @@ spec: description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups - will be added to any container. + will be added to any container. Note that this field + cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -5376,6 +5611,8 @@ spec: description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name + is windows. items: description: Sysctl defines a kernel parameter to be set @@ -5397,6 +5634,8 @@ spec: a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where the GMSA @@ -5624,7 +5863,7 @@ spec: location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming - pod if and only if every possible node assigment + pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread @@ -6104,9 +6343,7 @@ spec: to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent - volumes at the same time. \n This is a beta feature - and only available when the GenericEphemeralVolume - feature gate is enabled." + volumes at the same time." properties: volumeClaimTemplate: description: "Will be used to create a stand-alone @@ -6246,7 +6483,13 @@ spec: resources: description: 'Resources represents the minimum resources the volume should - have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed + to specify resource requirements that + are lower than previous value but + must still be higher than capacity + recorded in the status field of the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: limits: additionalProperties: @@ -7340,9 +7583,7 @@ spec: Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible - to be deleted immediately after it finishes. This field is alpha-level - and is only honored by servers that enable the TTLAfterFinished - feature. + to be deleted immediately after it finishes. format: int32 type: integer required: diff --git a/tests/package.json b/tests/package.json index 9bcf6b10f4f..b9adfd7fd8e 100644 --- a/tests/package.json +++ b/tests/package.json @@ -9,7 +9,7 @@ "require": [ "ts-node/register" ], - "timeout": "10m" + "timeout": "30m" }, "scripts": { "test": "ava" diff --git a/tests/run-all.sh b/tests/run-all.sh index d03d5d7e4ae..2403ca5783c 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=5 +concurrent_tests_limit=6 pids=() lookup=() failed_count=0 @@ -23,18 +23,52 @@ function run_tests { for test_case in $(find scalers -name "$E2E_REGEX" | shuf) do counter=$((counter+1)) - ./node_modules/.bin/ava $test_case > "${test_case}.log" 2>&1 & + ./node_modules/.bin/ava $test_case > "${test_case}.1.log" 2>&1 & pid=$! echo "Running $test_case with pid: $pid" pids+=($pid) lookup[$pid]=$test_case # limit concurrent runs - if [[ "$counter" -gt "$concurrent_tests_limit" ]]; then + if [[ "$counter" -ge "$concurrent_tests_limit" ]]; then wait_for_jobs counter=0 pids=() fi done + + wait_for_jobs + + # Retry failing tests + if [ ${#failed_lookup[@]} -ne 0 ]; then + + printf "\n\n##############################################\n" + printf "##############################################\n\n" + printf "FINISHED FIRST EXECUTION, RETRYING FAILING TESTS" + printf "\n\n##############################################\n" + printf "##############################################\n\n" + + retry_lookup=("${failed_lookup[@]}") + counter=0 + pids=() + failed_count=0 + failed_lookup=() + + for test_case in "${retry_lookup[@]}" + do + counter=$((counter+1)) + ./node_modules/.bin/ava $test_case > "${test_case}.2.log" 2>&1 & + pid=$! + echo "Rerunning $test_case with pid: $pid" + pids+=($pid) + lookup[$pid]=$test_case + # limit concurrent runs + if [[ "$counter" -ge "$concurrent_tests_limit" ]]; then + wait_for_jobs + counter=0 + pids=() + fi + done + fi } function mark_failed { diff --git a/tests/scalers/activemq.test.ts b/tests/scalers/activemq.test.ts index a0a4391ec1a..1adb3dc48b9 100644 --- a/tests/scalers/activemq.test.ts +++ b/tests/scalers/activemq.test.ts @@ -135,12 +135,6 @@ spec: name: mqtt protocol: TCP resources: - requests: - memory: 500Mi - cpu: 200m - limits: - memory: 1000Mi - cpu: 400m volumeMounts: - name: activemq-config mountPath: /opt/apache-activemq-5.16.3/webapps/api/WEB-INF/classes/jolokia-access.xml diff --git a/tests/scalers/argo-rollouts.test.ts b/tests/scalers/argo-rollouts.test.ts index 6cd8b9c6a8f..f42a8db2287 100644 --- a/tests/scalers/argo-rollouts.test.ts +++ b/tests/scalers/argo-rollouts.test.ts @@ -74,8 +74,8 @@ test.serial(`Rollouts should scale to 5 (the max) with HTTP Requests exceeding i // keda based rollout should start scaling up with http requests issued let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - t.log(`Waited ${5 * i} seconds for prometheus-based rollout to scale up`) + for (let i = 0; i < 120 && replicaCount !== '5'; i++) { + t.log(`Waited ${10 * i} seconds for prometheus-based rollout to scale up`) const jobLogs = sh.exec(`kubectl logs -l job-name=generate-requests -n ${testNamespace}`).stdout t.log(`Logs from the generate requests: ${jobLogs}`) @@ -83,22 +83,22 @@ test.serial(`Rollouts should scale to 5 (the max) with HTTP Requests exceeding i `kubectl get rollouts.argoproj.io/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - await sleep(5000) + await sleep(10000) } } t.is('5', replicaCount, 'Replica count should be maxed at 5') - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { + for (let i = 0; i < 90 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get rollouts.argoproj.io/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - await sleep(5000) + await sleep(10000) } } - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') + t.is('0', replicaCount, 'Replica count should be 0 after 15 minutes') }) test.after.always.cb('clean up argo-rollouts testing deployment', t => { diff --git a/tests/scalers/artemis-helpers.ts b/tests/scalers/artemis-helpers.ts index e5235576632..51fe2f18433 100644 --- a/tests/scalers/artemis-helpers.ts +++ b/tests/scalers/artemis-helpers.ts @@ -159,9 +159,6 @@ spec: image: docker.io/vromero/activemq-artemis:2.6.2 imagePullPolicy: resources: - requests: - cpu: 100m - memory: 256Mi env: - name: ARTEMIS_PASSWORD valueFrom: diff --git a/tests/scalers/azure-pipelines.test.ts b/tests/scalers/azure-pipelines.test.ts index 0d5d09251f9..c876328745b 100644 --- a/tests/scalers/azure-pipelines.test.ts +++ b/tests/scalers/azure-pipelines.test.ts @@ -15,6 +15,8 @@ const projectName = process.env['AZURE_DEVOPS_PROJECT'] const buildDefinitionID = process.env['AZURE_DEVOPS_BUILD_DEFINITON_ID'] const poolName = process.env['AZURE_DEVOPS_POOL_NAME'] +let poolID: number + test.before(async t => { if (!organizationURL || !personalAccessToken || !projectName || !buildDefinitionID || !poolName) { t.fail('AZURE_DEVOPS_ORGANIZATION_URL, AZURE_DEVOPS_PAT, AZURE_DEVOPS_PROJECT, AZURE_DEVOPS_BUILD_DEFINITON_ID and AZURE_DEVOPS_POOL_NAME environment variables are required for azure pipelines tests') @@ -25,7 +27,7 @@ test.before(async t => { let taskAgent: ta.ITaskAgentApiBase = await connection.getTaskAgentApi(); let agentPool: ti.TaskAgentPool[] = await taskAgent.getAgentPools(poolName) - let poolID: number = agentPool[0].id + poolID = agentPool[0].id if(!poolID) { t.fail("failed to convert poolName to poolID") @@ -40,38 +42,43 @@ test.before(async t => { .replace('{{AZP_URL}}', organizationURL)) sh.exec(`kubectl create namespace ${defaultNamespace}`) t.is(0, sh.exec(`kubectl apply -f ${deployFile.name} --namespace ${defaultNamespace}`).code, 'creating a deployment should work.') +}) + +test.serial('Deployment should have 1 replicas on start', async t => { + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should start out as 1') +}) + + +test.serial('Deployment should have 0 replicas after scale', async t => { + // wait for the first agent to be registered in the agent pool + await sleep(20 * 1000) + const scaledObjectFile = tmp.fileSync() fs.writeFileSync(scaledObjectFile.name, poolIdScaledObject .replace('{{AZP_POOL_ID}}', poolID.toString())) t.is(0, sh.exec(`kubectl apply -f ${scaledObjectFile.name} --namespace ${defaultNamespace}`).code, 'creating ScaledObject with poolId should work.') -}) -test.serial('Deployment should have 1 replicas on start', async t => { - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should start out as 1') + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should be 0 if no pending jobs') }) -test.serial('PoolID: Deployment should scale to 3 replicas after queueing 3 jobs', async t => { + +test.serial('PoolID: Deployment should scale to 1 replica after queueing job', async t => { let authHandler = azdev.getPersonalAccessTokenHandler(personalAccessToken); let connection = new azdev.WebApi(organizationURL, authHandler); let build: ba.IBuildApi = await connection.getBuildApi(); var definitionID = parseInt(buildDefinitionID) - // wait for the first agent to be registered in the agent pool - await sleep(20 * 1000) + await build.queueBuild(null, projectName, null, null, null, definitionID) - for(let i = 0; i < 3; i++) { - await build.queueBuild(null, projectName, null, null, null, definitionID) - } - - t.true(await waitForDeploymentReplicaCount(3, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 3 after starting 3 jobs') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 1 after starting a job') }) -test.serial('PoolID: Deployment should scale to 1 replica after finishing 3 jobs', async t => { +test.serial('PoolID: Deployment should scale to 0 replicas after finishing job', async t => { // wait 10 minutes for the jobs to finish and scale down - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 60, 10000), 'replica count should be 1 after finishing 3 jobs') + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 0 after finishing') }) -test.serial('PoolName: Deployment should scale to 3 replicas after queueing 3 jobs', async t => { +test.serial('PoolName: Deployment should scale to 1 replica after queueing job', async t => { const poolNameScaledObjectFile = tmp.fileSync() fs.writeFileSync(poolNameScaledObjectFile.name, poolNameScaledObject .replace('{{AZP_POOL}}', poolName)) @@ -82,16 +89,14 @@ test.serial('PoolName: Deployment should scale to 3 replicas after queueing 3 jo let build: ba.IBuildApi = await connection.getBuildApi(); var definitionID = parseInt(buildDefinitionID) - for(let i = 0; i < 3; i++) { - await build.queueBuild(null, projectName, null, null, null, definitionID) - } + await build.queueBuild(null, projectName, null, null, null, definitionID) - t.true(await waitForDeploymentReplicaCount(3, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 3 after starting 3 jobs') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 1 after starting a job') }) -test.serial('PoolName: should scale to 1 replica after finishing 3 jobs', async t => { +test.serial('PoolName: should scale to 0 replicas after finishing job', async t => { // wait 10 minutes for the jobs to finish and scale down - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 60, 10000), 'replica count should be 1 after finishing 3 jobs') + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 0 after finishing') }) test.after.always('clean up azure-pipelines deployment', t => { @@ -157,9 +162,9 @@ metadata: spec: scaleTargetRef: name: test-deployment - minReplicaCount: 1 - maxReplicaCount: 3 - pollingInterval: 50 + minReplicaCount: 0 + maxReplicaCount: 1 + pollingInterval: 30 cooldownPeriod: 60 advanced: horizontalPodAutoscalerConfig: @@ -179,9 +184,9 @@ metadata: spec: scaleTargetRef: name: test-deployment - minReplicaCount: 1 - maxReplicaCount: 3 - pollingInterval: 50 + minReplicaCount: 0 + maxReplicaCount: 1 + pollingInterval: 30 cooldownPeriod: 60 advanced: horizontalPodAutoscalerConfig: diff --git a/tests/scalers/azure-queue-restore-original-replicas.test.ts b/tests/scalers/azure-queue-restore-original-replicas.test.ts index e565ab44b79..1f93a84d0f0 100644 --- a/tests/scalers/azure-queue-restore-original-replicas.test.ts +++ b/tests/scalers/azure-queue-restore-original-replicas.test.ts @@ -1,9 +1,12 @@ +import * as azure from 'azure-storage' import * as fs from 'fs' import * as sh from 'shelljs' import * as tmp from 'tmp' import test from 'ava' +import {waitForDeploymentReplicaCount} from "./helpers"; const defaultNamespace = 'azure-queue-restore-original-replicas-test' +const queueName = 'queue-name-restore' const connectionString = process.env['TEST_STORAGE_CONNECTION_STRING'] test.before(t => { @@ -11,6 +14,10 @@ test.before(t => { t.fail('TEST_STORAGE_CONNECTION_STRING environment variable is required for queue tests') } + const queueSvc = azure.createQueueService(connectionString) + queueSvc.messageEncoder = new azure.QueueMessageEncoder.TextBase64QueueMessageEncoder() + queueSvc.createQueueIfNotExists(queueName, _ => {}) + sh.config.silent = true const base64ConStr = Buffer.from(connectionString).toString('base64') const tmpFile = tmp.fileSync() @@ -23,11 +30,8 @@ test.before(t => { ) }) -test.serial('Deployment should have 2 replicas on start', t => { - const replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '2', 'replica count should start out as 2') +test.serial('Deployment should have 2 replicas on start', async t => { + t.true(await waitForDeploymentReplicaCount(2, 'test-deployment', defaultNamespace, 15, 1000), 'replica count should be 2 after 15 seconds') }) test.serial('Creating ScaledObject should work', t => { @@ -44,18 +48,8 @@ test.serial('Creating ScaledObject should work', t => { test.serial( 'Deployment should scale to 0 and then shold be back to 2 after deletion of ScaledObject', - t => { - let replicaCount = '100' - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '0') { - sh.exec('sleep 5s') - } - } - t.is('0', replicaCount, 'Replica count should be 0') - + async t => { + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should be 0 after 2 minutes') t.is( 0, @@ -63,15 +57,7 @@ test.serial( 'deletion of ScaledObject should work.' ) - for (let i = 0; i < 50 && replicaCount !== '2'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '2') { - sh.exec('sleep 5s') - } - } - t.is('2', replicaCount, 'Replica count should be back at orignal 2') + t.true(await waitForDeploymentReplicaCount(2, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should be 2 after 2 minutes') } ) @@ -86,7 +72,13 @@ test.after.always.cb('clean up azure-queue deployment', t => { sh.exec(`kubectl delete ${resource} --namespace ${defaultNamespace}`) } sh.exec(`kubectl delete namespace ${defaultNamespace}`) - t.end() + + // delete test queue + const queueSvc = azure.createQueueService(connectionString) + queueSvc.deleteQueueIfExists(queueName, err => { + t.falsy(err, 'should delete test queue successfully') + t.end() + }) }) const deployYaml = `apiVersion: v1 @@ -145,5 +137,5 @@ spec: triggers: - type: azure-queue metadata: - queueName: queue-name + queueName: ${queueName} connectionFromEnv: AzureWebJobsStorage` diff --git a/tests/scalers/azure-queue-trigger-auth.test.ts b/tests/scalers/azure-queue-trigger-auth.test.ts index c341d2b867a..633688fafec 100644 --- a/tests/scalers/azure-queue-trigger-auth.test.ts +++ b/tests/scalers/azure-queue-trigger-auth.test.ts @@ -7,7 +7,7 @@ import test from 'ava' import {waitForDeploymentReplicaCount} from "./helpers"; const testNamespace = 'azure-queue-auth-test' -const queueName = 'queue-name' +const queueName = 'queue-name-trigger' const connectionString = process.env['TEST_STORAGE_CONNECTION_STRING'] test.before(async t => { @@ -44,7 +44,10 @@ test.serial( ) // Scaling out when messages available - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', testNamespace, 60, 1000), 'replica count should be 3 after 1 minute') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', testNamespace, 60, 1000), 'replica count should be 1 after 1 minute') + + queueSvc.clearMessages(queueName, _ => {}) + // Scaling in when no available messages t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', testNamespace, 300, 1000), 'replica count should be 0 after 5 minute') } diff --git a/tests/scalers/azure-queue.test.ts b/tests/scalers/azure-queue.test.ts index 8d5e64765f7..a13b19f9069 100644 --- a/tests/scalers/azure-queue.test.ts +++ b/tests/scalers/azure-queue.test.ts @@ -4,15 +4,21 @@ import * as fs from 'fs' import * as sh from 'shelljs' import * as tmp from 'tmp' import test from 'ava' +import {waitForDeploymentReplicaCount} from "./helpers"; const defaultNamespace = 'azure-queue-test' const connectionString = process.env['TEST_STORAGE_CONNECTION_STRING'] +const queueName = 'queue-single-name' -test.before(t => { +test.before(async t => { if (!connectionString) { t.fail('TEST_STORAGE_CONNECTION_STRING environment variable is required for queue tests') } + const queueSvc = azure.createQueueService(connectionString) + queueSvc.messageEncoder = new azure.QueueMessageEncoder.TextBase64QueueMessageEncoder() + queueSvc.createQueueIfNotExists(queueName, _ => {}) + sh.config.silent = true const base64ConStr = Buffer.from(connectionString).toString('base64') const tmpFile = tmp.fileSync() @@ -23,54 +29,27 @@ test.before(t => { sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${defaultNamespace}`).code, 'creating a deployment should work.' ) + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 60, 1000), 'replica count should be 0 after 1 minute') }) -test.serial('Deployment should have 0 replicas on start', t => { - const replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - -test.serial.cb( +test.serial( 'Deployment should scale to 4 with 10,000 messages on the queue then back to 0', - t => { - // add 10,000 messages + async t => { const queueSvc = azure.createQueueService(connectionString) queueSvc.messageEncoder = new azure.QueueMessageEncoder.TextBase64QueueMessageEncoder() - queueSvc.createQueueIfNotExists('queue-name', err => { - t.falsy(err, 'unable to create queue') - async.mapLimit( - Array(10000).keys(), - 200, - (n, cb) => queueSvc.createMessage('queue-name', `test ${n}`, cb), - () => { - let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '4'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '4') { - sh.exec('sleep 1s') - } - } + await async.mapLimit( + Array(1000).keys(), + 20, + (n, cb) => queueSvc.createMessage(queueName, `test ${n}`, cb) + ) - t.is('4', replicaCount, 'Replica count should be 4 after 30 seconds') + // Scaling out when messages available + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 60, 1000), 'replica count should be 1 after 1 minutes') - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '0') { - sh.exec('sleep 5s') - } - } + queueSvc.clearMessages(queueName, _ => {}) - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') - t.end() - } - ) - }) + // Scaling in when no available messages + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 300, 1000), 'replica count should be 0 after 5 minute') } ) @@ -88,7 +67,7 @@ test.after.always.cb('clean up azure-queue deployment', t => { // delete test queue const queueSvc = azure.createQueueService(connectionString) - queueSvc.deleteQueueIfExists('queue-name', err => { + queueSvc.deleteQueueIfExists(queueName, err => { t.falsy(err, 'should delete test queue successfully') t.end() }) @@ -142,10 +121,11 @@ spec: scaleTargetRef: name: test-deployment pollingInterval: 5 - maxReplicaCount: 4 + minReplicaCount: 0 + maxReplicaCount: 1 cooldownPeriod: 10 triggers: - type: azure-queue metadata: - queueName: queue-name + queueName: ${queueName} connectionFromEnv: AzureWebJobsStorage` diff --git a/tests/scalers/cassandra.test.ts b/tests/scalers/cassandra.test.ts index e4d38f5696f..d04bcb7caa2 100644 --- a/tests/scalers/cassandra.test.ts +++ b/tests/scalers/cassandra.test.ts @@ -22,10 +22,10 @@ test.before(t => { // wait for cassandra to load console.log("wait for cassandra to load") let cassandraReadyReplicaCount = '0' - for (let i = 0; i < 50; i++) { + for (let i = 0; i < 30; i++) { cassandraReadyReplicaCount = sh.exec(`kubectl get deploy/cassandra -n ${cassandraNamespace} -o jsonpath='{.status.readyReplicas}'`).stdout if (cassandraReadyReplicaCount != '1') { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } t.is('1', cassandraReadyReplicaCount, 'Cassandra is not in a ready state') @@ -36,7 +36,7 @@ test.before(t => { for (let i = 0; i < 30; i++) { cassandraReady = sh.exec(`kubectl exec -n ${cassandraNamespace} ${cassandraPod} -- nodetool status | grep -w -o UN`) if (cassandraReady != "UN\n") { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } else { break @@ -53,10 +53,10 @@ test.before(t => { // wait for cassandra-client to load console.log("wait for cassandra-client to load") let cassandraClientReadyReplicaCount = '0' - for (let i = 0; i < 50; i++) { + for (let i = 0; i < 30; i++) { cassandraClientReadyReplicaCount = sh.exec(`kubectl get deploy/cassandra-client -n ${cassandraNamespace} -o jsonpath='{.status.readyReplicas}'`).stdout if (cassandraClientReadyReplicaCount != '1') { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } t.is('1', cassandraClientReadyReplicaCount, 'Cassandra client is not in a ready state') @@ -67,7 +67,7 @@ test.before(t => { for (let i = 0; i < 30; i++) { cassandraClientReady = sh.exec(`kubectl exec -n ${cassandraNamespace} ${cassandraClientPod} -- nodetool status | grep -w -o UN`) if (cassandraClientReady != "UN\n") { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } else { break @@ -94,7 +94,7 @@ test.before(t => { for (let i = 0; i < 30; i++) { nginxReadyReplicaCount = sh.exec(`kubectl get deploy/${nginxDeploymentName} -n ${cassandraNamespace} -o jsonpath='{.status.readyReplicas}'`).stdout if (nginxReadyReplicaCount != '') { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } t.is('', nginxReadyReplicaCount, 'creating an Nginx deployment should work') @@ -108,7 +108,7 @@ test.serial('Should start off deployment with 0 replicas', t => { }) -test.serial(`Replicas should scale to 4 (the max) then back to 0`, t => { +test.serial(`Replicas should scale to 2 (the max) then back to 0`, t => { // insert data to cassandra console.log("insert data to cassandra") const insertData = `BEGIN BATCH @@ -130,17 +130,17 @@ test.serial(`Replicas should scale to 4 (the max) then back to 0`, t => { ) let replicaCount = '0' - const maxReplicaCount = '4' + const maxReplicaCount = '2' for (let i = 0; i < 30 && replicaCount !== maxReplicaCount; i++) { replicaCount = sh.exec( `kubectl get deploy/${nginxDeploymentName} --namespace ${cassandraNamespace} -o jsonpath="{.spec.replicas}"`).stdout if (replicaCount !== maxReplicaCount) { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } - t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 60 seconds`) + t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 300 seconds`) sh.exec('sleep 30s') // delete all data from cassandra @@ -157,11 +157,11 @@ test.serial(`Replicas should scale to 4 (the max) then back to 0`, t => { replicaCount = sh.exec( `kubectl get deploy/${nginxDeploymentName} --namespace ${cassandraNamespace} -o jsonpath="{.spec.replicas}"`).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') + t.is('0', replicaCount, 'Replica count should be 0 after 5 minutes') }) @@ -279,7 +279,7 @@ metadata: name: cassandra-scaledobject spec: minReplicaCount: 0 - maxReplicaCount: 4 + maxReplicaCount: 2 pollingInterval: 1 # Optional. Default: 30 seconds cooldownPeriod: 1 # Optional. Default: 300 seconds scaleTargetRef: diff --git a/tests/scalers/elasticsearch.test.ts b/tests/scalers/elasticsearch.test.ts index 3b04013d3c5..e7901338346 100644 --- a/tests/scalers/elasticsearch.test.ts +++ b/tests/scalers/elasticsearch.test.ts @@ -247,11 +247,6 @@ spec: name: transport protocol: TCP resources: - requests: - cpu: 100m - memory: 1Gi - limits: - memory: 1Gi readinessProbe: exec: command: diff --git a/tests/scalers/graphite.test.ts b/tests/scalers/graphite.test.ts index 1ee7b13a9d6..65061701a47 100644 --- a/tests/scalers/graphite.test.ts +++ b/tests/scalers/graphite.test.ts @@ -62,7 +62,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment php-apache-graphite --namespace ${graphiteNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } @@ -73,7 +73,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment php-apache-graphite --namespace ${graphiteNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } diff --git a/tests/scalers/mongodb.test.ts b/tests/scalers/mongodb.test.ts index b314b525500..3501aacce3a 100644 --- a/tests/scalers/mongodb.test.ts +++ b/tests/scalers/mongodb.test.ts @@ -1,4 +1,3 @@ -import * as async from 'async' import * as fs from 'fs' import * as sh from 'shelljs' import * as tmp from 'tmp' diff --git a/tests/scalers/mysql.test.ts b/tests/scalers/mysql.test.ts index 14f36d6df16..2c41448adf2 100644 --- a/tests/scalers/mysql.test.ts +++ b/tests/scalers/mysql.test.ts @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 (the max) then back to 0`, t => { const maxReplicaCount = '5' - for (let i = 0; i < 30 && replicaCount !== maxReplicaCount; i++) { + for (let i = 0; i < 60 && replicaCount !== maxReplicaCount; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/${deploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -85,7 +85,7 @@ test.serial(`Deployment should scale to 5 (the max) then back to 0`, t => { } } - t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 60 seconds`) + t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 120 seconds`) for (let i = 0; i < 36 && replicaCount !== '0'; i++) { replicaCount = sh.exec( diff --git a/tests/scalers/new-relic.test.ts b/tests/scalers/new-relic.test.ts index 6a2ecb7d245..45593ee1e96 100644 --- a/tests/scalers/new-relic.test.ts +++ b/tests/scalers/new-relic.test.ts @@ -102,14 +102,14 @@ test.serial('Deployment should have 1 replicas on start', t => { t.is(replicaCount, '1', 'replica count should start out as 0') }) -test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding in the rate then back to 0`, t => { +test.serial(`Deployment should scale to 3 (the max) with HTTP Requests exceeding in the rate then back to 0`, t => { // generate a large number of HTTP requests (using Apache Bench) that will take some time // so prometheus has some time to scrape it - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, generateRequestsYaml.replace('{{NAMESPACE}}', testNamespace)) + const loadGeneratorFile = tmp.fileSync() + fs.writeFileSync(loadGeneratorFile.name, generateRequestsYaml.replace('{{NAMESPACE}}', testNamespace)) t.is( 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, + sh.exec(`kubectl apply -f ${loadGeneratorFile.name} --namespace ${testNamespace}`).code, 'creating job should work.' ) @@ -123,7 +123,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding // keda based deployment should start scaling up with http requests issued let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '3'; i++) { t.log(`Waited ${5 * i} seconds for new-relic-based deployments to scale up`) const jobLogs = sh.exec(`kubectl logs -l job-name=generate-requests -n ${testNamespace}`).stdout t.log(`Logs from the generate requests: ${jobLogs}`) @@ -131,23 +131,29 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding replicaCount = sh.exec( `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout - if (replicaCount !== '5') { - sh.exec('sleep 5s') + if (replicaCount !== '3') { + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be maxed at 5') + t.is('3', replicaCount, 'Replica count should be maxed at 3') + + t.is( + 0, + sh.exec(`kubectl delete -f ${loadGeneratorFile.name} --namespace ${testNamespace}`).code, + 'deleting job should work.' + ) - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') + t.is('0', replicaCount, 'Replica count should be 0 after 6 minutes') sh.exec('sleep 10s') }) @@ -171,9 +177,9 @@ spec: - image: jordi/ab name: test command: ["/bin/sh"] - args: ["-c", "for i in $(seq 1 60);do echo $i;ab -c 5 -n 1000 -v 2 http://test-app/;sleep 1;done"] + args: ["-c", "for i in $(seq 1 60);do echo $i;ab -c 5 -n 10000 -v 2 http://test-app/;sleep 1;done"] restartPolicy: Never - activeDeadlineSeconds: 120 + activeDeadlineSeconds: 600 backoffLimit: 2` const deployYaml = `apiVersion: apps/v1 @@ -263,7 +269,7 @@ spec: scaleTargetRef: name: keda-test-app minReplicaCount: 0 - maxReplicaCount: 5 + maxReplicaCount: 3 pollingInterval: 5 cooldownPeriod: 10 triggers: @@ -271,7 +277,7 @@ spec: metadata: account: '{{NEWRELIC_ACCOUNT_ID}}' region: '{{NEWRELIC_REGION}}' - threshold: '100' + threshold: '10' nrql: SELECT average(\`http_requests_total\`) FROM Metric where serviceName='test-app' and namespaceName='new-relic-test' since 60 seconds ago authenticationRef: name: newrelic-trigger diff --git a/tests/scalers/openstack-swift.test.ts b/tests/scalers/openstack-swift.test.ts index fb267bb8e16..ef871e4bacf 100644 --- a/tests/scalers/openstack-swift.test.ts +++ b/tests/scalers/openstack-swift.test.ts @@ -184,13 +184,13 @@ test.serial('Deployment should be scaled to 5 after deleting 5 objects in contai await swiftClient.deleteObject(swiftContainerName, '2/hello-world.txt') await swiftClient.deleteObject(swiftContainerName, '3/') - for (let i = 0; i < 110 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/${deploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } diff --git a/tests/scalers/predictkube.test.ts b/tests/scalers/predictkube.test.ts index 5db9437fb63..151718692e6 100644 --- a/tests/scalers/predictkube.test.ts +++ b/tests/scalers/predictkube.test.ts @@ -68,7 +68,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding // keda based deployment should start scaling up with http requests issued let replicaCount = '0' for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - t.log(`Waited ${5 * i} seconds for predictkube-based deployments to scale up`) + t.log(`Waited ${10 * i} seconds for predictkube-based deployments to scale up`) const jobLogs = sh.exec(`kubectl logs -l job-name=generate-requests -n ${testNamespace}`).stdout t.log(`Logs from the generate requests: ${jobLogs}`) @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } @@ -88,7 +88,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } diff --git a/tests/scalers/prometheus.test.ts b/tests/scalers/prometheus.test.ts index 1eaa9df9cfe..04e79d81b52 100644 --- a/tests/scalers/prometheus.test.ts +++ b/tests/scalers/prometheus.test.ts @@ -73,7 +73,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } @@ -84,7 +84,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } diff --git a/tests/scalers/redis-cluster-lists.test.ts b/tests/scalers/redis-cluster-lists.test.ts index a78ca1d5bdb..c6ad09afdd6 100644 --- a/tests/scalers/redis-cluster-lists.test.ts +++ b/tests/scalers/redis-cluster-lists.test.ts @@ -30,14 +30,14 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let clusterStatus = sh.exec(`helm install --timeout 600s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code + let clusterStatus = sh.exec(`helm install --timeout 900s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code t.is(0, clusterStatus, 'creating a Redis cluster should work.' ) // Wait for Redis cluster to be ready. - t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300)) + t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600)) // Get Redis cluster address. redisHost = sh.exec(`kubectl get svc ${redisService} -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) @@ -133,19 +133,19 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -155,7 +155,7 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) test.serial('Deployment for redis address env var should have 0 replica on start', t => { @@ -173,19 +173,19 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -195,7 +195,7 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) @@ -213,19 +213,19 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -235,7 +235,7 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) diff --git a/tests/scalers/redis-cluster-streams.test.ts b/tests/scalers/redis-cluster-streams.test.ts index 3faed339a7f..86e0ca85800 100644 --- a/tests/scalers/redis-cluster-streams.test.ts +++ b/tests/scalers/redis-cluster-streams.test.ts @@ -18,14 +18,14 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let clusterStatus = sh.exec(`helm install --timeout 600s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code + let clusterStatus = sh.exec(`helm install --timeout 900s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code t.is(0, clusterStatus, 'creating a Redis cluster should work.' ) // Wait for Redis cluster to be ready. - let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300) + let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600) t.is(0, exitCode, 'expected rollout status for redis to finish successfully') // Get Redis cluster address. @@ -66,7 +66,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back ) // Wait for producer job to finish. - for (let i = 0; i < 40; i++) { + for (let i = 0; i < 60; i++) { const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout if (succeeded == '1') { break @@ -74,7 +74,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back sh.exec('sleep 1s') } // With messages published, the consumer deployment should start receiving the messages. - t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 30, 3000), 'Replica count should be 5 within 60 seconds') + t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 5 within 10 minutes') t.true(await waitForDeploymentReplicaCount(1, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 1 within 10 minutes') }) diff --git a/tests/scalers/redis-lists.test.ts b/tests/scalers/redis-lists.test.ts index cf37bef07f7..b526bee09aa 100644 --- a/tests/scalers/redis-lists.test.ts +++ b/tests/scalers/redis-lists.test.ts @@ -33,7 +33,7 @@ test.before(t => { t.is(0, sh.exec(`kubectl apply --namespace ${redisNamespace} -f ${redisDeployTmpFile.name}`).code, 'creating a Redis deployment should work.') // wait for redis to be ready - t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 300), 'Redis is not in a ready state') + t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 600), 'Redis is not in a ready state') sh.exec(`kubectl create namespace ${testNamespace}`) @@ -124,19 +124,19 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -146,7 +146,7 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) test.serial('Deployment for redis address env var should have 0 replica on start', t => { @@ -164,19 +164,19 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -186,7 +186,7 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) @@ -204,19 +204,19 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -226,7 +226,7 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) diff --git a/tests/scalers/redis-sentinel-lists.test.ts b/tests/scalers/redis-sentinel-lists.test.ts index 1d0171b1ba0..0208df8bf73 100644 --- a/tests/scalers/redis-sentinel-lists.test.ts +++ b/tests/scalers/redis-sentinel-lists.test.ts @@ -31,14 +31,14 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let sentinelStatus = sh.exec(`helm install --timeout 600s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code + let sentinelStatus = sh.exec(`helm install --timeout 900s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code t.is(0, sentinelStatus, 'creating a Redis sentinel setup should work.' ) // Wait for Redis sentinel to be ready. - t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300), 'Redis is not in a ready state') + t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600), 'Redis is not in a ready state') // Get Redis sentinel address. redisHost = sh.exec(`kubectl get svc ${redisService} -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) @@ -142,19 +142,19 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -164,7 +164,7 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) test.serial('Deployment for redis address env var should have 0 replica on start', t => { @@ -182,19 +182,19 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -204,7 +204,7 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) @@ -222,19 +222,19 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -244,7 +244,7 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) diff --git a/tests/scalers/redis-sentinel-streams.test.ts b/tests/scalers/redis-sentinel-streams.test.ts index dd0e257148d..dd79c4a4d23 100644 --- a/tests/scalers/redis-sentinel-streams.test.ts +++ b/tests/scalers/redis-sentinel-streams.test.ts @@ -19,14 +19,14 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let sentinelStatus = sh.exec(`helm install --timeout 600s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code + let sentinelStatus = sh.exec(`helm install --timeout 900s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code t.is(0, sentinelStatus, 'creating a Redis Sentinel setup should work.' ) // Wait for Redis Sentinel to be ready. - let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300) + let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600) t.is(0, exitCode, 'expected rollout status for redis to finish successfully') // Get Redis Sentinel address. @@ -68,7 +68,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back ) // Wait for producer job to finish. - for (let i = 0; i < 40; i++) { + for (let i = 0; i < 60; i++) { const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout if (succeeded == '1') { break @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back sh.exec('sleep 1s') } // With messages published, the consumer deployment should start receiving the messages. - t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 30, 3000), 'Replica count should be 5 within 60 seconds') + t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 30, 10000), 'Replica count should be 5 within 5 minutes') t.true(await waitForDeploymentReplicaCount(1, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 1 within 10 minutes') }) diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts index 72fa5032ce6..6b17ecdc41a 100644 --- a/tests/scalers/redis-streams.test.ts +++ b/tests/scalers/redis-streams.test.ts @@ -21,7 +21,7 @@ test.before(t => { t.is(0, sh.exec(`kubectl apply --namespace ${redisNamespace} -f ${tmpFile1.name}`).code, 'creating a Redis deployment should work.') // wait for redis to be ready - t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 300), 'Redis is not in a ready state') + t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 600), 'Redis is not in a ready state') sh.exec(`kubectl create namespace ${testNamespace}`) @@ -57,7 +57,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back ) // wait for the producer job to complete - for (let i = 0; i < 20; i++) { + for (let i = 0; i < 60; i++) { const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout if (succeeded == '1') { break @@ -66,17 +66,17 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back } // with messages published, the consumer deployment should start receiving the messages let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') for (let i = 0; i < 60 && replicaCount !== '1'; i++) { replicaCount = sh.exec( diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index 03cfbcf1fc4..71611af8c3c 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -19,35 +19,18 @@ test.before(t => { let seleniumHubReplicaCount = '0'; - for (let i = 0; i < 30; i++) { + for (let i = 0; i < 60; i++) { seleniumHubReplicaCount = sh.exec(`kubectl get deploy/selenium-hub -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout if (seleniumHubReplicaCount == '1') { break; } console.log('Waiting for selenium hub to be ready'); - sh.exec('sleep 2s') + sh.exec('sleep 5s') } t.is('1', seleniumHubReplicaCount, 'Selenium Hub is not in a ready state') }); -test.serial('should have one node for chrome and firefox each at start', t => { - let seleniumChromeNodeReplicaCount = '0'; - let seleniumFireFoxReplicaCount = '0'; - for (let i = 0; i < 30; i++) { - seleniumChromeNodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout - seleniumFireFoxReplicaCount = sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout - if (seleniumChromeNodeReplicaCount == '1' && seleniumFireFoxReplicaCount == '1') { - break; - } - console.log('Waiting for chrome and firefox node to be ready'); - sh.exec('sleep 2s') - } - - t.is('1', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale up to 1 pods') - t.is('1', seleniumFireFoxReplicaCount, 'Selenium Firefox Node did not scale up to 1 pods') -}); - -test.serial('should scale down browser nodes to 0', t => { +test.serial('should have 0 nodes at start', t => { const scaledObjectDeployTmpFile = tmp.fileSync(); fs.writeFileSync(scaledObjectDeployTmpFile.name, scaledObjectYaml.replace(/{{NAMESPACE}}/g, seleniumGridNamespace).replace(/{{SELENIUM_GRID_GRAPHQL_URL}}/g, seleniumGridGraphQLUrl)); @@ -62,7 +45,7 @@ test.serial('should scale down browser nodes to 0', t => { break; } console.log('Waiting for chrome and firefox to scale down to 0 pods') - sh.exec('sleep 5s') + sh.exec('sleep 10s') } t.is('0', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale down to 0 pods') @@ -83,18 +66,9 @@ test.serial('should create one chrome and firefox node', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); - // wait for selenium grid tests to start running - for (let i = 0; i < 20; i++) { - const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout - if (running == '1') { - break; - } - sh.exec('sleep 1s') - } - - let seleniumChromeNodeReplicaCount = '0'; + let seleniumChromeNodeReplicaCount = '0'; let seleniumFireFoxReplicaCount = '0'; - for (let i = 0; i < 30; i++) { + for (let i = 0; i < 120; i++) { seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; if (seleniumChromeNodeReplicaCount == '1' && seleniumFireFoxReplicaCount == '1') { @@ -108,30 +82,30 @@ test.serial('should create one chrome and firefox node', t => { t.is('1', seleniumFireFoxReplicaCount, 'Selenium Firefox Node did not scale up to 1 pod') // wait for selenium grid tests to complete - let succeeded = '0'; - for (let i = 0; i < 60; i++) { - succeeded = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { + for (let i = 0; i < 120; i++) { + seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '0' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; + seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '0' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; + if (seleniumChromeNodeReplicaCount == '0' && seleniumFireFoxReplicaCount == '0') { break; } - sh.exec('sleep 1s') + console.log('Waiting for chrome to scale up 0 pod and firefox to 0 pod'); + sh.exec('sleep 2s') } sh.exec(`kubectl delete job/${seleniumGridTestName} --namespace ${seleniumGridNamespace}`) }); test.serial('should scale down chrome and firefox nodes to 0', t => { - let seleniumChromeNodeReplicaCount = '1'; let seleniumFireFoxReplicaCount = '1'; - for (let i = 0; i < 65; i++) { + for (let i = 0; i < 120; i++) { seleniumChromeNodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout; seleniumFireFoxReplicaCount = sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout; if (seleniumChromeNodeReplicaCount == '0' && seleniumFireFoxReplicaCount == '0') { break; } console.log('Waiting for chrome and firefox to scale down to 0 pod'); - sh.exec('sleep 5s') + sh.exec('sleep 2s') } t.is('0', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale down to 0 pod') @@ -145,13 +119,13 @@ test.serial('should create two chrome and one firefox nodes', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${chrome91DeployTmpFile.name}`).code, 'creating Chrome 91 node should work.') let seleniumChrome91NodeReplicaCount = '1'; - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 120; i++) { seleniumChrome91NodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node-91 -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout if (seleniumChrome91NodeReplicaCount == '0') { break; } console.log('Waiting for chrome 91 to scale down to 0 pods') - sh.exec('sleep 5s') + sh.exec('sleep 2s') } const seleniumGridTestDeployTmpFile = tmp.fileSync(); @@ -167,19 +141,10 @@ test.serial('should create two chrome and one firefox nodes', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); - // wait for selenium grid tests to start running - for (let i = 0; i < 20; i++) { - const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout - if (running == '1') { - break; - } - sh.exec('sleep 1s') - } - let seleniumChromeNodeReplicaCount = '0'; let seleniumFireFoxReplicaCount = '0'; seleniumChrome91NodeReplicaCount = '0'; - for (let i = 0; i < 30; i++) { + for (let i = 0; i < 120; i++) { seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; seleniumChrome91NodeReplicaCount = seleniumChrome91NodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node-91 -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChrome91NodeReplicaCount; @@ -329,7 +294,7 @@ metadata: app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 helm.sh/chart: selenium-grid-0.2.0 spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: selenium-chrome-node @@ -353,12 +318,6 @@ spec: - name: dshm mountPath: /dev/shm resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi volumes: - name: dshm emptyDir: @@ -380,7 +339,7 @@ metadata: app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 helm.sh/chart: selenium-grid-0.2.0 spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: selenium-firefox-node @@ -404,12 +363,6 @@ spec: - name: dshm mountPath: /dev/shm resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi volumes: - name: dshm emptyDir: @@ -484,7 +437,7 @@ metadata: app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 helm.sh/chart: selenium-grid-0.2.0 spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: selenium-chrome-node-91 @@ -508,12 +461,6 @@ spec: - name: dshm mountPath: /dev/shm resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi volumes: - name: dshm emptyDir: @@ -552,7 +499,9 @@ metadata: labels: deploymentName: selenium-chrome-node-91 spec: - maxReplicaCount: 8 + maxReplicaCount: 1 + pollingInterval: 5 + cooldownPeriod: 5 scaleTargetRef: name: selenium-chrome-node-91 triggers: @@ -573,7 +522,9 @@ metadata: labels: deploymentName: selenium-chrome-node spec: - maxReplicaCount: 8 + maxReplicaCount: 1 + pollingInterval: 5 + cooldownPeriod: 5 scaleTargetRef: name: selenium-chrome-node triggers: @@ -591,7 +542,9 @@ metadata: labels: deploymentName: selenium-firefox-node spec: - maxReplicaCount: 8 + maxReplicaCount: 1 + pollingInterval: 5 + cooldownPeriod: 5 scaleTargetRef: name: selenium-firefox-node triggers: @@ -614,7 +567,7 @@ spec: spec: containers: - name: {{CONTAINER_NAME}} - image: prashanth0007/selenium-random-tests:v1.0.2 + image: ghcr.io/kedacore/tests-selenium-grid imagePullPolicy: Always env: - name: HOST_NAME