diff --git a/test/e2e/upgrade/alert/alert.go b/test/e2e/upgrade/alert/alert.go index 081d1d395467..df8a66510b83 100644 --- a/test/e2e/upgrade/alert/alert.go +++ b/test/e2e/upgrade/alert/alert.go @@ -170,7 +170,17 @@ count_over_time(ALERTS{alertstate="firing",severity!="info",alertname!~"Watchdog } // Invariant: There should be no pending alerts 1m after the upgrade completes - pendingAlertQuery := `ALERTS{alertname!~"Watchdog|AlertmanagerReceiversNotConfigured",alertstate="pending",severity!="info"}` + pendingAlertQuery := fmt.Sprintf(` +sort_desc( + time() * ALERTS + 1 + - + last_over_time(( + time() * ALERTS{alertname!~"Watchdog|AlertmanagerReceiversNotConfigured",alertstate="pending",severity!="info"} + unless + ALERTS offset 1s + )[%[1]s:1s]) +) +`, testDuration) result, err = helper.RunQuery(pendingAlertQuery, ns, execPod.Name, t.url, t.bearerToken) o.Expect(err).NotTo(o.HaveOccurred(), "unable to retrieve pending alerts after upgrade") for _, series := range result.Data.Result { diff --git a/test/extended/prometheus/prometheus.go b/test/extended/prometheus/prometheus.go index 8f5a79540073..0a9c650943fc 100644 --- a/test/extended/prometheus/prometheus.go +++ b/test/extended/prometheus/prometheus.go @@ -151,7 +151,17 @@ count_over_time(ALERTS{alertstate="firing",severity!="info",alertname!~"Watchdog } // Invariant: There should be no pending alerts after the test run - pendingAlertQuery := `ALERTS{alertname!~"Watchdog|AlertmanagerReceiversNotConfigured",alertstate="pending",severity!="info"}` + pendingAlertQuery := fmt.Sprintf(` +sort_desc( + time() * ALERTS + 1 + - + last_over_time(( + time() * ALERTS{alertname!~"Watchdog|AlertmanagerReceiversNotConfigured",alertstate="pending",severity!="info"} + unless + ALERTS offset 1s + )[%[1]s:1s]) +) +`, testDuration) result, err = helper.RunQuery(pendingAlertQuery, ns, execPod.Name, url, bearerToken) o.Expect(err).NotTo(o.HaveOccurred(), "unable to retrieve pending alerts after upgrade") for _, series := range result.Data.Result {