Skip to content

Commit

Permalink
Move status command to TestFunctional/serial
Browse files Browse the repository at this point in the history
I noticed that TestComponentHealth/parallel/ComponentHealth was failing with this error:

```
Error apiserver status: https://172.17.0.3:8441/healthz returned error 500:
[-]etcd failed: reason withheld
```

but by the time post mortem logs were printed the etcd container was up and running.

I think this test occasionally fails because apiserver healthz is not yet returning a 200 status when we run the test. We wait for healthz to return 200 on regular start, but not on soft start, which we run in `TestFunctional`.

This PR adds a retry, which should give the apiserver time to become healthy.
  • Loading branch information
Priya Wadhwa committed May 19, 2020
1 parent bf8c4e3 commit cc40795
Showing 1 changed file with 50 additions and 39 deletions.
89 changes: 50 additions & 39 deletions test/integration/functional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (

"github.com/google/go-cmp/cmp"

"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/util/retry"
Expand Down Expand Up @@ -83,7 +84,7 @@ func TestFunctional(t *testing.T) {
{"StartWithProxy", validateStartWithProxy}, // Set everything else up for success
{"SoftStart", validateSoftStart}, // do a soft start. ensure config didnt change.
{"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start"
{"KubectlGetPods", validateKubectlGetPods}, // Make sure kubectl is returning pods
{"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up
{"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy
{"MinikubeKubectlCmd", validateMinikubeKubectl}, // Make sure `minikube kubectl` works
}
Expand All @@ -105,25 +106,25 @@ func TestFunctional(t *testing.T) {
validator validateFunc
}{
{"ComponentHealth", validateComponentHealth},
{"ConfigCmd", validateConfigCmd},
{"DashboardCmd", validateDashboardCmd},
{"DNS", validateDNS},
{"DryRun", validateDryRun},
{"StatusCmd", validateStatusCmd},
{"LogsCmd", validateLogsCmd},
{"MountCmd", validateMountCmd},
{"ProfileCmd", validateProfileCmd},
{"ServiceCmd", validateServiceCmd},
{"AddonsCmd", validateAddonsCmd},
{"PersistentVolumeClaim", validatePersistentVolumeClaim},
{"TunnelCmd", validateTunnelCmd},
{"SSHCmd", validateSSHCmd},
{"MySQL", validateMySQL},
{"FileSync", validateFileSync},
{"CertSync", validateCertSync},
{"UpdateContextCmd", validateUpdateContextCmd},
{"DockerEnv", validateDockerEnv},
{"NodeLabels", validateNodeLabels},
// {"ConfigCmd", validateConfigCmd},
// {"DashboardCmd", validateDashboardCmd},
// {"DNS", validateDNS},
// {"DryRun", validateDryRun},
// {"StatusCmd", validateStatusCmd},
// {"LogsCmd", validateLogsCmd},
// {"MountCmd", validateMountCmd},
// {"ProfileCmd", validateProfileCmd},
// {"ServiceCmd", validateServiceCmd},
// {"AddonsCmd", validateAddonsCmd},
// {"PersistentVolumeClaim", validatePersistentVolumeClaim},
// {"TunnelCmd", validateTunnelCmd},
// {"SSHCmd", validateSSHCmd},
// {"MySQL", validateMySQL},
// {"FileSync", validateFileSync},
// {"CertSync", validateCertSync},
// {"UpdateContextCmd", validateUpdateContextCmd},
// {"DockerEnv", validateDockerEnv},
// {"NodeLabels", validateNodeLabels},
}
for _, tc := range tests {
tc := tc
Expand Down Expand Up @@ -229,7 +230,7 @@ func validateSoftStart(ctx context.Context, t *testing.T, profile string) {
t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.KubernetesConfig.NodePort)
}

softStartArgs := []string{"start", "-p", profile, "--wait=all"}
softStartArgs := []string{"start", "-p", profile}
c := exec.CommandContext(ctx, Target(), softStartArgs...)
rr, err := Run(t, c)
if err != nil {
Expand Down Expand Up @@ -293,27 +294,37 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string)
func validateComponentHealth(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)

rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json"))
if err != nil {
t.Fatalf("failed to get components. args %q: %v", rr.Command(), err)
}
cs := api.ComponentStatusList{}
d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes()))
if err := d.Decode(&cs); err != nil {
t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err)
}
f := func() (bool, error) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json"))
if err != nil {
t.Logf("failed to get components. args %q: %v", rr.Command(), err)
return false, nil
}
cs := api.ComponentStatusList{}
d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes()))
if err := d.Decode(&cs); err != nil {
t.Logf("failed to decode kubectl json output: args %q : %v", rr.Command(), err)
return false, nil
}

for _, i := range cs.Items {
status := api.ConditionFalse
for _, c := range i.Conditions {
if c.Type != api.ComponentHealthy {
continue
for _, i := range cs.Items {
status := api.ConditionFalse
for _, c := range i.Conditions {
if c.Type != api.ComponentHealthy {
continue
}
status = c.Status
}
if status != api.ConditionTrue {
t.Logf("unexpected status: %v - item: %+v", status, i)
return false, nil
}
status = c.Status
}
if status != api.ConditionTrue {
t.Errorf("unexpected status: %v - item: %+v", status, i)
}
return true, nil
}

if err := wait.PollImmediate(10*time.Second, 40*time.Second, f); err != nil {
t.Fatalf("error: %v", err)
}
}

Expand Down

0 comments on commit cc40795

Please sign in to comment.