Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

allow minikube status to display for one node #8238

Merged
merged 6 commits into from
May 21, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@ matrix:
script: make
after_success:
- bash <(curl -s https://codecov.io/bash)
travisBuddy:
regex: (FAIL:|\.go:\d+:|^panic:|failed$)
notifications:
webhooks:
urls:
Expand Down
2 changes: 1 addition & 1 deletion cmd/minikube/cmd/node_start.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ var nodeStartCmd = &cobra.Command{
api, cc := mustload.Partial(ClusterFlagValue())
name := args[0]

n, _, err := node.Retrieve(cc, name)
n, _, err := node.Retrieve(*cc, name)
if err != nil {
exit.WithError("retrieving node", err)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/minikube/cmd/node_stop.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ var nodeStopCmd = &cobra.Command{
name := args[0]
api, cc := mustload.Partial(ClusterFlagValue())

n, _, err := node.Retrieve(cc, name)
n, _, err := node.Retrieve(*cc, name)
if err != nil {
exit.WithError("retrieving node", err)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/minikube/cmd/ssh.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ var sshCmd = &cobra.Command{
if nodeName == "" {
n = co.CP.Node
} else {
n, _, err = node.Retrieve(co.Config, nodeName)
n, _, err = node.Retrieve(*co.Config, nodeName)
if err != nil {
exit.WithCodeT(exit.Unavailable, "Node {{.nodeName}} does not exist.", out.V{"nodeName": nodeName})
}
Expand Down
32 changes: 24 additions & 8 deletions cmd/minikube/cmd/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"k8s.io/minikube/pkg/minikube/kubeconfig"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/mustload"
"k8s.io/minikube/pkg/minikube/node"
)

var statusFormat string
Expand Down Expand Up @@ -105,19 +106,33 @@ var statusCmd = &cobra.Command{
api, cc := mustload.Partial(cname)

var statuses []*Status
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err := status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)

if nodeName != "" || statusFormat != defaultStatusFormat && len(cc.Nodes) > 1 {
n, _, err := node.Retrieve(*cc, nodeName)
if err != nil {
glog.Errorf("status error: %v", err)
exit.WithError("retrieving node", err)
}
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)

st, err := status(api, *cc, *n)
if err != nil {
glog.Errorf("status error: %v", err)
}
statuses = append(statuses, st)
} else {
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err := status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)

if err != nil {
glog.Errorf("status error: %v", err)
}
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)
}
statuses = append(statuses, st)
}
}

switch strings.ToLower(output) {
Expand Down Expand Up @@ -253,6 +268,7 @@ func init() {
For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status`)
statusCmd.Flags().StringVarP(&output, "output", "o", "text",
`minikube status --output OUTPUT. json, text`)
statusCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes.")
}

func statusText(st *Status, w io.Writer) error {
Expand Down
12 changes: 10 additions & 2 deletions pkg/minikube/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package node
import (
"fmt"

"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/spf13/viper"

Expand Down Expand Up @@ -59,7 +60,7 @@ func Add(cc *config.ClusterConfig, n config.Node) error {

// Delete stops and deletes the given node from the given cluster
func Delete(cc config.ClusterConfig, name string) (*config.Node, error) {
n, index, err := Retrieve(&cc, name)
n, index, err := Retrieve(cc, name)
if err != nil {
return n, errors.Wrap(err, "retrieve")
}
Expand All @@ -79,11 +80,18 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) {
}

// Retrieve finds the node by name in the given cluster
func Retrieve(cc *config.ClusterConfig, name string) (*config.Node, int, error) {
func Retrieve(cc config.ClusterConfig, name string) (*config.Node, int, error) {

for i, n := range cc.Nodes {
if n.Name == name {
return &n, i, nil
}

// Accept full machine name as well as just node name
if driver.MachineName(cc, n) == name {
glog.Infof("Couldn't find node name %s, but found it as a machine name, returning it anyway.", name)
return &n, i, nil
}
}

return nil, -1, errors.New("Could not find node " + name)
Expand Down
1 change: 1 addition & 0 deletions site/content/en/docs/commands/status.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ minikube status [flags]
-f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/
For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\ntype: Control Plane\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\n\n")
-h, --help help for status
-n, --node string The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes.
-o, --output string minikube status --output OUTPUT. json, text (default "text")
```

Expand Down
109 changes: 63 additions & 46 deletions test/integration/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,66 +191,83 @@ func CleanupWithLogs(t *testing.T, profile string, cancel context.CancelFunc) {
}

// PostMortemLogs shows logs for debugging a failed cluster
func PostMortemLogs(t *testing.T, profile string) {
func PostMortemLogs(t *testing.T, profile string, multinode ...bool) {
if !t.Failed() {
return
}

if !*postMortemLogs {
t.Logf("post-mortem logs disabled, oh-well!")
t.Logf("post-mortem logs disabled, oh well!")
return
}

m := false
if len(multinode) > 0 {
m = multinode[0]
}

nodes := []string{profile}
if m {
nodes = append(nodes, SecondNodeName, ThirdNodeName)
}

t.Logf("-----------------------post-mortem--------------------------------")

if DockerDriver() {
t.Logf("======> post-mortem[%s]: docker inspect <======", t.Name())
rr, err := Run(t, exec.Command("docker", "inspect", profile))
if err != nil {
t.Logf("failed to get docker inspect: %v", err)
} else {
t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output())
for _, n := range nodes {
machine := profile
if n != profile {
machine = fmt.Sprintf("%s-%s", profile, n)
}
if DockerDriver() {
t.Logf("======> post-mortem[%s]: docker inspect <======", t.Name())
rr, err := Run(t, exec.Command("docker", "inspect", machine))
if err != nil {
t.Logf("failed to get docker inspect: %v", err)
} else {
t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output())
}
}
}

st := Status(context.Background(), t, Target(), profile, "Host")
if st != state.Running.String() {
t.Logf("%q host is not running, skipping log retrieval (state=%q)", profile, st)
return
}
t.Logf("<<< %s FAILED: start of post-mortem logs <<<", t.Name())
t.Logf("======> post-mortem[%s]: minikube logs <======", t.Name())
st := Status(context.Background(), t, Target(), profile, "Host", n)
if st != state.Running.String() {
t.Logf("%q host is not running, skipping log retrieval (state=%q)", profile, st)
return
}
t.Logf("<<< %s FAILED: start of post-mortem logs <<<", t.Name())
t.Logf("======> post-mortem[%s]: minikube logs <======", t.Name())

rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "-n", "25"))
if err != nil {
t.Logf("failed logs error: %v", err)
return
}
t.Logf("%s logs: %s", t.Name(), rr.Output())
rr, err := Run(t, exec.Command(Target(), "-p", profile, "logs", "-n", "25"))
if err != nil {
t.Logf("failed logs error: %v", err)
return
}
t.Logf("%s logs: %s", t.Name(), rr.Output())

st = Status(context.Background(), t, Target(), profile, "APIServer")
if st != state.Running.String() {
t.Logf("%q apiserver is not running, skipping kubectl commands (state=%q)", profile, st)
return
}
st = Status(context.Background(), t, Target(), profile, "APIServer", n)
if st != state.Running.String() {
t.Logf("%q apiserver is not running, skipping kubectl commands (state=%q)", profile, st)
return
}

// Get non-running pods. NOTE: This does not yet contain pods which are "running", but not "ready"
rr, rerr := Run(t, exec.Command("kubectl", "--context", profile, "get", "po", "-o=jsonpath={.items[*].metadata.name}", "-A", "--field-selector=status.phase!=Running"))
if rerr != nil {
t.Logf("%s: %v", rr.Command(), rerr)
return
}
notRunning := strings.Split(rr.Stdout.String(), " ")
t.Logf("non-running pods: %s", strings.Join(notRunning, " "))
// Get non-running pods. NOTE: This does not yet contain pods which are "running", but not "ready"
rr, rerr := Run(t, exec.Command("kubectl", "--context", profile, "get", "po", "-o=jsonpath={.items[*].metadata.name}", "-A", "--field-selector=status.phase!=Running"))
if rerr != nil {
t.Logf("%s: %v", rr.Command(), rerr)
return
}
notRunning := strings.Split(rr.Stdout.String(), " ")
t.Logf("non-running pods: %s", strings.Join(notRunning, " "))

t.Logf("======> post-mortem[%s]: describe non-running pods <======", t.Name())
t.Logf("======> post-mortem[%s]: describe non-running pods <======", t.Name())

args := append([]string{"--context", profile, "describe", "pod"}, notRunning...)
rr, rerr = Run(t, exec.Command("kubectl", args...))
if rerr != nil {
t.Logf("%s: %v", rr.Command(), rerr)
return
args := append([]string{"--context", profile, "describe", "pod"}, notRunning...)
rr, rerr = Run(t, exec.Command("kubectl", args...))
if rerr != nil {
t.Logf("%s: %v", rr.Command(), rerr)
return
}
t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output())
}
t.Logf("(dbg) %s:\n%s", rr.Command(), rr.Output())

t.Logf("<<< %s FAILED: end of post-mortem logs <<<", t.Name())
t.Logf("---------------------/post-mortem---------------------------------")
Expand Down Expand Up @@ -355,10 +372,10 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec
}

// Status returns a minikube component status as a string
func Status(ctx context.Context, t *testing.T, path string, profile string, key string) string {
func Status(ctx context.Context, t *testing.T, path string, profile string, key string, node string) string {
t.Helper()
// Reminder of useful keys: "Host", "Kubelet", "APIServer"
rr, err := Run(t, exec.CommandContext(ctx, path, "status", fmt.Sprintf("--format={{.%s}}", key), "-p", profile))
rr, err := Run(t, exec.CommandContext(ctx, path, "status", fmt.Sprintf("--format={{.%s}}", key), "-p", profile, "-n", node))
if err != nil {
t.Logf("status error: %v (may be ok)", err)
}
Expand All @@ -368,7 +385,7 @@ func Status(ctx context.Context, t *testing.T, path string, profile string, key
// showPodLogs logs debug info for pods
func showPodLogs(ctx context.Context, t *testing.T, profile string, ns string, names []string) {
t.Helper()
st := Status(context.Background(), t, Target(), profile, "APIServer")
st := Status(context.Background(), t, Target(), profile, "APIServer", profile)
if st != state.Running.String() {
t.Logf("%q apiserver is not running, skipping kubectl commands (state=%q)", profile, st)
return
Expand Down
6 changes: 6 additions & 0 deletions test/integration/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@ var timeOutMultiplier = flag.Float64("timeout-multiplier", 1, "multiply the time
var binaryPath = flag.String("binary", "../../out/minikube", "path to minikube binary")
var testdataDir = flag.String("testdata-dir", "testdata", "the directory relative to test/integration where the testdata lives")

// Node names are consistent, let's store these for easy access later
const (
SecondNodeName = "m02"
ThirdNodeName = "m03"
)

// TestMain is the test main
func TestMain(m *testing.M) {
flag.Parse()
Expand Down
16 changes: 5 additions & 11 deletions test/integration/multinode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ func TestMultiNode(t *testing.T) {
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
defer PostMortemLogs(t, profile)
tc.validator(ctx, t, profile)
})
}
Expand Down Expand Up @@ -104,11 +105,8 @@ func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile strin
}

func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
// Names are autogenerated using the node.Name() function
name := "m03"

// Run minikube node stop on that node
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", ThirdNodeName))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
Expand Down Expand Up @@ -143,11 +141,8 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
// TODO (#7496): remove skip once restarts work
t.Skip("Restarting nodes is broken :(")

// Grab the stopped node
name := "m03"

// Start the node back up
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", ThirdNodeName))
if err != nil {
t.Errorf("node start returned an error. args %q: %v", rr.Command(), err)
}
Expand All @@ -168,10 +163,9 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
}

func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) {
name := "m03"

// Start the node back up
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", ThirdNodeName))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
Expand All @@ -195,7 +189,7 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile
if err != nil {
t.Errorf("failed to run %q : %v", rr.Command(), err)
}
if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s-%s", profile, name)) {
if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s-%s", profile, ThirdNodeName)) {
t.Errorf("docker volume was not properly deleted: %s", rr.Stdout.String())
}
}
Expand Down
Loading