diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 9d5ec2844373..abeb612342af 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -55,6 +55,8 @@ func TestDownloadOnly(t *testing.T) { for _, v := range versions { t.Run(v, func(t *testing.T) { + defer PostMortemLogs(t, profile) + // Explicitly does not pass StartArgs() to test driver default // --force to avoid uid check args := append([]string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", r)}, StartArgs()...) @@ -124,6 +126,8 @@ func TestDownloadOnly(t *testing.T) { // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! t.Run("DeleteAll", func(t *testing.T) { + defer PostMortemLogs(t, profile) + if !CanCleanup() { t.Skip("skipping, as cleanup is disabled") } @@ -134,6 +138,8 @@ func TestDownloadOnly(t *testing.T) { }) // Delete should always succeed, even if previously partially or fully deleted. t.Run("DeleteAlwaysSucceeds", func(t *testing.T) { + defer PostMortemLogs(t, profile) + if !CanCleanup() { t.Skip("skipping, as cleanup is disabled") } diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 597617beef85..0a63c39d23ae 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -38,9 +38,9 @@ import ( func TestAddons(t *testing.T) { profile := UniqueProfileName("addons") ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) - defer CleanupWithLogs(t, profile, cancel) + defer Cleanup(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("%s failed: %v", rr.Command(), err) @@ -82,6 +82,8 @@ func TestAddons(t *testing.T) { } func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } @@ -156,6 +158,8 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { } func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + client, err := kapi.Client(profile) if err != nil { t.Fatalf("failed to get Kubernetes client for %s : %v", profile, err) @@ -230,6 +234,8 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { } func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + client, err := kapi.Client(profile) if err != nil { t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err) @@ -272,6 +278,8 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin } func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + client, err := kapi.Client(profile) if err != nil { t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err) diff --git a/test/integration/fn_pvc.go b/test/integration/fn_pvc.go index 9cca92cc4780..18f573d3bdfd 100644 --- a/test/integration/fn_pvc.go +++ b/test/integration/fn_pvc.go @@ -34,6 +34,8 @@ import ( ) func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + ctx, cancel := context.WithTimeout(ctx, Minutes(10)) defer cancel() diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index cebb777067ab..e2e716288dfc 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -78,7 +78,7 @@ func TestFunctional(t *testing.T) { t.Logf("unable to remove %q: %v", p, err) } - CleanupWithLogs(t, profile, cancel) + Cleanup(t, profile, cancel) }() // Serial tests @@ -145,6 +145,8 @@ func TestFunctional(t *testing.T) { // validateNodeLabels checks if minikube cluster is created with correct kubernetes's node label func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'")) if err != nil { t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err) @@ -159,6 +161,8 @@ func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { // check functionality of minikube after evaling docker-env func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + mctx, cancel := context.WithTimeout(ctx, Seconds(13)) defer cancel() // we should be able to get minikube status with a bash which evaled docker-env @@ -188,6 +192,8 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { } func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + srv, err := startHTTPProxy(t) if err != nil { t.Fatalf("failed to set up the test proxy: %s", err) @@ -219,6 +225,8 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { // validateSoftStart validates that after minikube already started, a "minikube start" should not change the configs. func validateSoftStart(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + start := time.Now() // the test before this had been start with --apiserver-port=8441 beforeCfg, err := config.LoadProfile(profile) @@ -250,6 +258,8 @@ func validateSoftStart(ctx context.Context, t *testing.T, profile string) { // validateKubeContext asserts that kubectl is properly configured (race-condition prone!) func validateKubeContext(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context")) if err != nil { t.Errorf("failed to get current-context. args %q : %v", rr.Command(), err) @@ -261,6 +271,8 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) { // validateKubectlGetPods asserts that `kubectl get pod -A` returns non-zero content func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A")) if err != nil { t.Errorf("failed to get kubectl pods: args %q : %v", rr.Command(), err) @@ -275,6 +287,8 @@ func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { // validateMinikubeKubectl validates that the `minikube kubectl` command returns content func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + // Must set the profile so that it knows what version of Kubernetes to use kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) @@ -285,6 +299,8 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) // validateComponentHealth asserts that all Kubernetes components are healthy func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) if err != nil { t.Fatalf("failed to get components. args %q: %v", rr.Command(), err) @@ -310,6 +326,8 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) } func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { t.Errorf("failed to run minikube status. args %q : %v", rr.Command(), err) @@ -352,6 +370,8 @@ func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { // validateDashboardCmd asserts that the dashboard command works func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { @@ -391,6 +411,8 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { // validateDNS asserts that all Kubernetes DNS is healthy func validateDNS(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err) @@ -424,7 +446,7 @@ func validateDryRun(ctx context.Context, t *testing.T, profile string) { defer cancel() // Too little memory! - startArgs := append([]string{"start", "-p", profile, "--dry-run", "--memory", "250MB", "--alsologtostderr", "-v=1"}, StartArgs()...) + startArgs := append([]string{"start", "-p", profile, "--dry-run", "--memory", "250MB", "--alsologtostderr"}, StartArgs()...) c := exec.CommandContext(mctx, Target(), startArgs...) rr, err := Run(t, c) @@ -445,6 +467,8 @@ func validateDryRun(ctx context.Context, t *testing.T, profile string) { // validateCacheCmd tests functionality of cache command (cache add, delete, list) func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + if NoneDriver() { t.Skipf("skipping: cache unsupported by none") } @@ -639,6 +663,8 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // validateServiceCmd asserts basic "service" command functionality func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + defer func() { if t.Failed() { t.Logf("service test failed - dumping debug information") @@ -684,11 +710,6 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String()) } - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "hello-node")) - if err != nil { - t.Logf("%q failed: %v (may not be an error)", rr.Command(), err) - } - if NeedsPortForward() { t.Skipf("test is broken for port-forwarded drivers: https://github.com/kubernetes/minikube/issues/7383") } @@ -703,12 +724,14 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { } endpoint := strings.TrimSpace(rr.Stdout.String()) + t.Logf("found endpoint: %s", endpoint) + u, err := url.Parse(endpoint) if err != nil { t.Fatalf("failed to parse service url endpoint %q: %v", endpoint, err) } if u.Scheme != "https" { - t.Errorf("expected scheme to be 'https' but got %q", u.Scheme) + t.Errorf("expected scheme for %s to be 'https' but got %q", endpoint, u.Scheme) } // Test --format=IP @@ -720,6 +743,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Command()) } + // Test a regular URL rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url")) if err != nil { t.Errorf("failed to get service url. args: %q: %v", rr.Command(), err) @@ -737,9 +761,6 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("expected scheme to be -%q- got scheme: *%q*", "http", u.Scheme) } - c := retryablehttp.NewClient() - c.Logger = &logAdapter{t: t} - t.Logf("Attempting to fetch %s ...", endpoint) fetch := func() error { @@ -769,14 +790,10 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { } } -type logAdapter struct{ t *testing.T } - -func (l *logAdapter) Printf(s string, args ...interface{}) { - l.t.Logf(s, args...) -} - // validateAddonsCmd asserts basic "addon" command functionality func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + // Table output rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list")) if err != nil { @@ -802,6 +819,8 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // validateSSHCmd asserts basic "ssh" command functionality func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } @@ -817,6 +836,8 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { // validateMySQL validates a minimalist MySQL deployment func validateMySQL(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml"))) if err != nil { t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Command(), err) @@ -905,6 +926,8 @@ func setupFileSync(ctx context.Context, t *testing.T, profile string) { // validateFileSync to check existence of the test file func validateFileSync(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } @@ -930,6 +953,8 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) { // validateCertSync to check existence of the test certificate func validateCertSync(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + if NoneDriver() { t.Skipf("skipping: ssh unsupported by none") } @@ -963,6 +988,8 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { // validateUpdateContextCmd asserts basic "update-context" command functionality func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2")) if err != nil { t.Errorf("failed to run minikube update-context: args %q: %v", rr.Command(), err) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index e2e94be44ddf..21eff2428e6f 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -186,26 +186,32 @@ func CleanupWithLogs(t *testing.T, profile string, cancel context.CancelFunc) { } t.Logf("*** %s FAILED at %s", t.Name(), time.Now()) - - if *postMortemLogs { - clusterLogs(t, profile) - } + PostMortemLogs(t, profile) Cleanup(t, profile, cancel) } -// clusterLogs shows logs for debugging a failed cluster -func clusterLogs(t *testing.T, profile string) { +// PostMortemLogs shows logs for debugging a failed cluster +func PostMortemLogs(t *testing.T, profile string) { + if !t.Failed() { + return + } + + if !*postMortemLogs { + t.Logf("post-mortem logs disabled, oh-well!") + } + t.Logf("-----------------------post-mortem--------------------------------") if DockerDriver() { - t.Logf("======> post-mortem[%s]: docker logs <======", t.Name()) - rr, err := Run(t, exec.Command("docker", "logs", "--details", profile)) + t.Logf("======> post-mortem[%s]: docker inpect <======", t.Name()) + rr, err := Run(t, exec.Command("docker", "inspect", profile)) if err != nil { - t.Logf("failed to get docker logs : %v", err) + t.Logf("failed to get docker inspect: %v", err) } else { t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output()) } } + st := Status(context.Background(), t, Target(), profile, "Host") if st != state.Running.String() { t.Logf("%q host is not running, skipping log retrieval (state=%q)", profile, st) @@ -214,49 +220,37 @@ func clusterLogs(t *testing.T, profile string) { t.Logf("<<< %s FAILED: start of post-mortem logs <<<", t.Name()) t.Logf("======> post-mortem[%s]: minikube logs <======", t.Name()) - rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "--problems")) + rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "-n", "25")) if err != nil { t.Logf("failed logs error: %v", err) return } t.Logf("%s logs: %s", t.Name(), rr.Output()) - t.Logf("======> post-mortem[%s]: disk usage <======", t.Name()) - rr, err = Run(t, exec.Command(Target(), "-p", profile, "ssh", "sudo df -h /var/lib/docker/overlay2 /var /;sudo du -hs /var/lib/docker/overlay2")) - if err != nil { - t.Logf("failed df error: %v", err) - } - t.Logf("%s df: %s", t.Name(), rr.Stdout) - st = Status(context.Background(), t, Target(), profile, "APIServer") if st != state.Running.String() { t.Logf("%q apiserver is not running, skipping kubectl commands (state=%q)", profile, st) return } - t.Logf("======> post-mortem[%s]: get pods <======", t.Name()) - rr, rerr := Run(t, exec.Command("kubectl", "--context", profile, "get", "po", "-A", "--show-labels")) + // Get non-running pods. NOTE: This does not yet contain pods which are "running", but not "ready" + rr, rerr := Run(t, exec.Command("kubectl", "--context", profile, "get", "po", "-o=jsonpath={.items[*].metadata.name}", "-A", "--field-selector=status.phase!=Running")) if rerr != nil { t.Logf("%s: %v", rr.Command(), rerr) return } - t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output()) + notRunning := strings.Split(rr.Stdout.String(), " ") + t.Logf("non-running pods: %s", strings.Join(notRunning, " ")) - t.Logf("======> post-mortem[%s]: describe node <======", t.Name()) - rr, err = Run(t, exec.Command("kubectl", "--context", profile, "describe", "node")) - if err != nil { - t.Logf("%s: %v", rr.Command(), err) - } else { - t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output()) - } + t.Logf("======> post-mortem[%s]: describe non-running pods <======", t.Name()) - t.Logf("======> post-mortem[%s]: describe pods <======", t.Name()) - rr, err = Run(t, exec.Command("kubectl", "--context", profile, "describe", "po", "-A")) - if err != nil { - t.Logf("%s: %v", rr.Command(), err) - } else { - t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Stdout) + args := append([]string{"--context", profile, "describe", "pod"}, notRunning...) + rr, rerr = Run(t, exec.Command("kubectl", args...)) + if rerr != nil { + t.Logf("%s: %v", rr.Command(), rerr) + return } + t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output()) t.Logf("<<< %s FAILED: end of post-mortem logs <<<", t.Name()) t.Logf("---------------------/post-mortem---------------------------------") diff --git a/test/integration/pause_test.go b/test/integration/pause_test.go index d9b3e337103c..73f3b99de66f 100644 --- a/test/integration/pause_test.go +++ b/test/integration/pause_test.go @@ -32,7 +32,7 @@ func TestPause(t *testing.T) { type validateFunc func(context.Context, *testing.T, string) profile := UniqueProfileName("pause") ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) - defer CleanupWithLogs(t, profile, cancel) + defer Cleanup(t, profile, cancel) // Serial tests t.Run("serial", func(t *testing.T) { @@ -52,12 +52,17 @@ func TestPause(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { tc.validator(ctx, t, profile) + if t.Failed() && *postMortemLogs { + PostMortemLogs(t, profile) + } }) } }) } func validateFreshStart(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + args := append([]string{"start", "-p", profile, "--memory=1800", "--install-addons=false", "--wait=all"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { @@ -67,23 +72,25 @@ func validateFreshStart(ctx context.Context, t *testing.T, profile string) { // validateStartNoReconfigure validates that starting a running cluster does not invoke reconfiguration func validateStartNoReconfigure(ctx context.Context, t *testing.T, profile string) { - args := []string{"start", "-p", profile, "--alsologtostderr", "-v=5"} + defer PostMortemLogs(t, profile) + + args := []string{"start", "-p", profile, "--alsologtostderr", "-v=1"} rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - defer clusterLogs(t, profile) t.Fatalf("failed to second start a running minikube with args: %q : %v", rr.Command(), err) } if !NoneDriver() { softLog := "The running cluster does not require reconfiguration" if !strings.Contains(rr.Output(), softLog) { - defer clusterLogs(t, profile) t.Errorf("expected the second start log output to include %q but got: %s", softLog, rr.Output()) } } } func validatePause(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + args := []string{"pause", "-p", profile, "--alsologtostderr", "-v=5"} rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { @@ -92,6 +99,8 @@ func validatePause(ctx context.Context, t *testing.T, profile string) { } func validateUnpause(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + args := []string{"unpause", "-p", profile, "--alsologtostderr", "-v=5"} rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { @@ -100,6 +109,8 @@ func validateUnpause(ctx context.Context, t *testing.T, profile string) { } func validateDelete(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + args := []string{"delete", "-p", profile, "--alsologtostderr", "-v=5"} rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { @@ -109,6 +120,8 @@ func validateDelete(ctx context.Context, t *testing.T, profile string) { // make sure no left over left after deleting a profile such as containers or volumes func validateVerifyDeleted(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { t.Errorf("failed to list profiles with json format after it was deleted. args %q: %v", rr.Command(), err) diff --git a/test/integration/preload_test.go b/test/integration/preload_test.go index 1763eee3a8e2..9364b0017154 100644 --- a/test/integration/preload_test.go +++ b/test/integration/preload_test.go @@ -35,7 +35,7 @@ func TestPreload(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true", "--preload=false"} + startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "--wait=true", "--preload=false"} startArgs = append(startArgs, StartArgs()...) k8sVersion := "v1.17.0" startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) @@ -53,7 +53,7 @@ func TestPreload(t *testing.T) { } // Restart minikube with v1.17.3, which has a preloaded tarball - startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"} + startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1", "--wait=true"} startArgs = append(startArgs, StartArgs()...) k8sVersion = "v1.17.3" startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index b6764ea967b5..94dc09b30c9f 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -109,21 +109,21 @@ func TestStartStop(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3")) if err != nil { - t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err) + t.Fatalf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err) } // The none driver never really stops if !NoneDriver() { got := Status(ctx, t, Target(), profile, "Host") if got != state.Stopped.String() { - t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got) + t.Fatalf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got) } } // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err) + t.Fatalf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) @@ -145,7 +145,7 @@ func TestStartStop(t *testing.T) { got := Status(ctx, t, Target(), profile, "Host") if got != state.Running.String() { - t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got) + t.Fatalf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got) } if !NoneDriver() { @@ -176,6 +176,8 @@ func TestStartStop(t *testing.T) { // testPodScheduling asserts that this configuration can schedule new pods func testPodScheduling(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + t.Helper() // schedule a pod to assert persistence @@ -211,6 +213,7 @@ func testPodScheduling(ctx context.Context, t *testing.T, profile string) { // testPulledImages asserts that this configuration pulls only expected images func testPulledImages(ctx context.Context, t *testing.T, profile string, version string) { t.Helper() + defer PostMortemLogs(t, profile) rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) if err != nil { @@ -254,6 +257,7 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version // testPause asserts that this configuration can be paused and unpaused func testPause(ctx context.Context, t *testing.T, profile string) { t.Helper() + defer PostMortemLogs(t, profile) rr, err := Run(t, exec.CommandContext(ctx, Target(), "pause", "-p", profile, "--alsologtostderr", "-v=1")) if err != nil { diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 3f2626a882f5..7a80a70231b9 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -68,7 +68,7 @@ func TestVersionUpgrade(t *testing.T) { // Assert that --iso-url works without a sha checksum, and that we can upgrade from old ISO's // Some day, this will break an implicit assumption that a tool is available in the ISO :) oldISO := "https://storage.googleapis.com/minikube/iso/integration-test.iso" - args := append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--iso-url=%s", oldISO), fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--iso-url=%s", oldISO), fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr"}, StartArgs()...) rr := &RunResult{} r := func() error { rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...)) @@ -121,7 +121,7 @@ func TestVersionUpgrade(t *testing.T) { } t.Logf("Attempting to downgrade Kubernetes (should fail)") - args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion)}, StartArgs()...) if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { t.Fatalf("downgrading Kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command()) }