diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go index e3acdac36612..bdeb73156fcd 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go @@ -18,6 +18,7 @@ package framework import ( "fmt" + "strings" "time" batch "k8s.io/api/batch/v1" @@ -233,22 +234,28 @@ func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Dura }) } -// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not -// nil the returned bool is true if the Job is running. -func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) { +// EnsureAllJobPodsRunning uses c to check in the Job named jobName in ns +// is running, returning an error if the expected parallelism is not +// satisfied. +func EnsureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName})) options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { - return false, err + return err } + podsSummary := make([]string, 0, parallelism) count := int32(0) for _, p := range pods.Items { if p.Status.Phase == v1.PodRunning { count++ } + podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message)) + } + if count != parallelism { + return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", ")) } - return count == parallelism, nil + return nil } // WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns diff --git a/vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/job.go b/vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/job.go index 89313ddad9d1..88d7fc514213 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/job.go +++ b/vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/job.go @@ -45,7 +45,7 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) By("Ensuring active pods == parallelism") - err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2) + err = framework.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) Expect(err).NotTo(HaveOccurred()) } @@ -53,14 +53,11 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) { func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done By("Ensuring active pods == parallelism") - running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) - Expect(err).NotTo(HaveOccurred()) - - if !running { + err := framework.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) + if err != nil { framework.DumpAllNamespaceInfo(f.ClientSet, t.namespace) } - - Expect(running).To(BeTrue()) + Expect(err).NotTo(HaveOccurred()) } // Teardown cleans up any remaining resources.