From 94a9c03b91bf15bbd97b82dbac6e3f62c3062687 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 30 Apr 2020 16:02:43 -0700 Subject: [PATCH 01/27] initial commit for restartWorker --- cmd/minikube/cmd/status.go | 2 +- pkg/minikube/bootstrapper/bootstrapper.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 40 +++++++++++++++----- pkg/minikube/driver/endpoint.go | 4 +- pkg/minikube/mustload/mustload.go | 2 +- pkg/minikube/node/start.go | 6 +-- 6 files changed, 39 insertions(+), 17 deletions(-) diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 46d19de015f3..a49817abd428 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -222,7 +222,7 @@ func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status return st, nil } - hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &n, host.DriverName) + hostname, _, port, err := driver.ControlPlaneEndpoint(&cc, &n, host.DriverName) if err != nil { glog.Errorf("forwarded endpoint: %v", err) st.Kubeconfig = Misconfigured diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 0019b1750d5a..96b460412a70 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -39,7 +39,7 @@ type Bootstrapper interface { UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error WaitForNode(config.ClusterConfig, config.Node, time.Duration) error - JoinCluster(config.ClusterConfig, config.Node, string) error + JoinCluster(config.ClusterConfig, config.Node, string, bool) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 1c4853dab058..3cf5223860ac 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -288,7 +288,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { if err := bsutil.ExistingConfig(k.c); err == nil { glog.Infof("found existing configuration files, will attempt cluster restart") - rerr := k.restartCluster(cfg) + rerr := k.restartControlPlane(cfg) if rerr == nil { return nil } @@ -351,7 +351,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time out.T(out.HealthCheck, "Verifying Kubernetes components...") // TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT - hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &n, cfg.Driver) + hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &n, cfg.Driver) if err != nil { return errors.Wrap(err, "get control plane endpoint") } @@ -468,7 +468,7 @@ func (k *Bootstrapper) needsReset(conf string, hostname string, port int, client } // restartCluster restarts the Kubernetes cluster configured by kubeadm -func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { +func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { glog.Infof("restartCluster start") start := time.Now() @@ -497,7 +497,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "primary control plane") } - hostname, _, port, err := driver.ControlPaneEndpoint(&cfg, &cp, cfg.Driver) + hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &cp, cfg.Driver) if err != nil { return errors.Wrap(err, "control plane") } @@ -578,15 +578,18 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { } // JoinCluster adds a node to an existing cluster -func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error { +func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string, preExists bool) error { start := time.Now() glog.Infof("JoinCluster: %+v", cc) defer func() { glog.Infof("JoinCluster complete in %s", time.Since(start)) }() + if preExists { + return k.restartWorker(cc, joinCmd) + } // Join the master by specifying its token - joinCmd = fmt.Sprintf("%s --v=10 --node-name=%s", joinCmd, driver.MachineName(cc, n)) + joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, driver.MachineName(cc, n)) out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) if err != nil { return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) @@ -599,8 +602,27 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC return nil } -// GenerateToken creates a token and returns the appropriate kubeadm join command to run -func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { +func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, token string) error { + cp, err := config.PrimaryControlPlane(&cc) + if err != nil { + return errors.Wrap(err, "getting primary control plane") + } + host, _, port, err := driver.ControlPlaneEndpoint(&cc, &cp, cc.Driver) + if err != nil { + return errors.Wrap(err, "getting control plane endpoint") + } + + cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(host, strconv.Itoa(port)), token) + _, err = k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) + if err != nil { + return errors.Wrap(err, "running join phase kubelet-start") + } + return nil +} + +// GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token +func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig) (string, error) { + // If we're starting a new node, create a new token and return the full join command tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) r, err := k.c.RunCmd(tokenCmd) if err != nil { @@ -610,7 +632,7 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { joinCmd := r.Stdout.String() joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) - + fmt.Println(joinCmd) return joinCmd, nil } diff --git a/pkg/minikube/driver/endpoint.go b/pkg/minikube/driver/endpoint.go index dc9507a4d9d5..f4a7ccfdf502 100644 --- a/pkg/minikube/driver/endpoint.go +++ b/pkg/minikube/driver/endpoint.go @@ -24,8 +24,8 @@ import ( "k8s.io/minikube/pkg/minikube/constants" ) -// ControlPaneEndpoint returns the location where callers can reach this cluster -func ControlPaneEndpoint(cc *config.ClusterConfig, cp *config.Node, driverName string) (string, net.IP, int, error) { +// ControlPlaneEndpoint returns the location where callers can reach this cluster +func ControlPlaneEndpoint(cc *config.ClusterConfig, cp *config.Node, driverName string) (string, net.IP, int, error) { if NeedsPortForward(driverName) { port, err := oci.ForwardedPort(cc.Driver, cc.Name, cp.Port) hostname := oci.DefaultBindIPV4 diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index 0910ba35fc9f..66f1421d3689 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -119,7 +119,7 @@ func Running(name string) ClusterController { exit.WithError("Unable to get command runner", err) } - hostname, ip, port, err := driver.ControlPaneEndpoint(cc, &cp, host.DriverName) + hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName) if err != nil { exit.WithError("Unable to get forwarded endpoint", err) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index d4c8a1e7f77e..3336e47fb724 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -169,12 +169,12 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "getting control plane bootstrapper") } - joinCmd, err := cpBs.GenerateToken(*starter.Cfg) + joinCmd, err := cpBs.GenerateToken(*starter.Cfg, starter.PreExists) if err != nil { return nil, errors.Wrap(err, "generating join token") } - if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { + if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd, starter.PreExists); err != nil { return nil, errors.Wrap(err, "joining cluster") } } @@ -307,7 +307,7 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu } func apiServerURL(h host.Host, cc config.ClusterConfig, n config.Node) (string, error) { - hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &n, h.DriverName) + hostname, _, port, err := driver.ControlPlaneEndpoint(&cc, &n, h.DriverName) if err != nil { return "", err } From 76349e19b20b73eb533f01549ddd2974f0bc6cda Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 1 May 2020 17:02:11 -0700 Subject: [PATCH 02/27] more changes --- cmd/minikube/cmd/node_add.go | 15 ++++++- cmd/minikube/cmd/start.go | 8 +++- cmd/minikube/cmd/status.go | 2 +- pkg/minikube/bootstrapper/bootstrapper.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 46 ++++++++++++++++---- pkg/minikube/config/types.go | 1 + pkg/minikube/machine/fix.go | 4 +- pkg/minikube/node/node.go | 8 ++-- pkg/minikube/node/start.go | 2 +- 9 files changed, 66 insertions(+), 22 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index d47d54cce1de..2bdada7f08b0 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -17,8 +17,11 @@ limitations under the License. package cmd import ( + "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" + cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" + "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" @@ -60,7 +63,8 @@ var nodeAddCmd = &cobra.Command{ cc.Memory = 2200 } - if err := node.Add(cc, n); err != nil { + starter, err := node.Add(cc, n) + if err != nil { _, err := maybeDeleteAndRetry(*cc, n, nil, err) if err != nil { exit.WithError("failed to add node", err) @@ -68,11 +72,18 @@ var nodeAddCmd = &cobra.Command{ } // Add CNI config if it's not already there - // We need to run kubeadm.init here as well if err := config.MultiNodeCNIConfig(cc); err != nil { exit.WithError("failed to save config", err) } + // Restart the control plane to pick up the new CNI + bs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, cc, cmdcfg.Bootstrapper) + if err != nil { + glog.Warningf("failed to get control plane bootstrapper: %v", err) + } else { + bs.StartCluster(*cc) + } + out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 7a0faac6f3ba..214ec57d7340 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -278,7 +278,11 @@ func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kub } numNodes := viper.GetInt(nodes) - if numNodes == 1 && existing != nil { + if existing != nil { + if numNodes > 1 { + // We ignore the --nodes parameter if we're restarting an existing cluster + out.WarningT(`The cluster {{.cluster}} already exists which means the --nodes parameter will be ignored. Use "minikube node add" to add nodes to an existing cluster.`, out.V{"cluster": existing.Name}) + } numNodes = len(existing.Nodes) } if numNodes > 1 { @@ -294,7 +298,7 @@ func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kub KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, } out.Ln("") // extra newline for clarity on the command line - err := node.Add(starter.Cfg, n) + _, err := node.Add(starter.Cfg, n) if err != nil { return nil, errors.Wrap(err, "adding node") } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index a49817abd428..d37b46037625 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -217,7 +217,7 @@ func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status glog.Infof("%s kubelet status = %s", name, stk) st.Kubelet = stk.String() - // Early exit for regular nodes + // Early exit for worker nodes if !controlPlane { return st, nil } diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 96b460412a70..b592dd559827 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -41,7 +41,7 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string, bool) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - GenerateToken(config.ClusterConfig) (string, error) + GenerateToken(*config.ClusterConfig, *config.Node) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 3cf5223860ac..dfc7ee0ca13a 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -586,7 +586,7 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC }() if preExists { - return k.restartWorker(cc, joinCmd) + return k.restartWorker(cc, n) } // Join the master by specifying its token joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, driver.MachineName(cc, n)) @@ -602,7 +602,16 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC return nil } -func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, token string) error { +func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) error { + + if kverify.KubeletStatus(k.c) == state.Running { + return nil + } + + if err := k.clearStaleConfigs(cc); err != nil { + return errors.Wrap(err, "clearing stale configs") + } + cp, err := config.PrimaryControlPlane(&cc) if err != nil { return errors.Wrap(err, "getting primary control plane") @@ -612,7 +621,8 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, token string) erro return errors.Wrap(err, "getting control plane endpoint") } - cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(host, strconv.Itoa(port)), token) + // Make sure to account for if n.Token doesn't exist for older configs + cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s --discovery-token-unsafe-skip-ca-verification", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(host, strconv.Itoa(port)), n.Token) _, err = k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) if err != nil { return errors.Wrap(err, "running join phase kubelet-start") @@ -621,18 +631,32 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, token string) erro } // GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token -func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig) (string, error) { - // If we're starting a new node, create a new token and return the full join command - tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) - r, err := k.c.RunCmd(tokenCmd) +func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig, n *config.Node) (string, error) { + // Generate the token so we can store it + genTokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token generate", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) + r, err := k.c.RunCmd(genTokenCmd) if err != nil { return "", errors.Wrap(err, "generating bootstrap token") } + token := strings.TrimSpace(r.Stdout.String()) + n.Token = token + + // Take that generated token and use it to get a kubeadm join command + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create %s --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), token)) + r, err = k.c.RunCmd(tokenCmd) + if err != nil { + return "", errors.Wrap(err, "generating join command") + } joinCmd := r.Stdout.String() joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) - fmt.Println(joinCmd) + + // Save the new token for later use + err = config.SaveNode(cc, n) + if err != nil { + return joinCmd, errors.Wrap(err, "saving node") + } return joinCmd, nil } @@ -746,10 +770,14 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru } files := []assets.CopyableFile{ - assets.NewMemoryAssetTarget(kubeadmCfg, bsutil.KubeadmYamlPath+".new", "0640"), assets.NewMemoryAssetTarget(kubeletCfg, bsutil.KubeletSystemdConfFile+".new", "0644"), assets.NewMemoryAssetTarget(kubeletService, bsutil.KubeletServiceFile+".new", "0644"), } + + if n.ControlPlane { + files = append(files, assets.NewMemoryAssetTarget(kubeadmCfg, bsutil.KubeadmYamlPath+".new", "0640")) + } + // Copy the default CNI config (k8s.conf), so that kubelet can successfully // start a Pod in the case a user hasn't manually installed any CNI plugin // and minikube was started with "--extra-config=kubelet.network-plugin=cni". diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index ab42bca6ea9e..77776b991c41 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -111,6 +111,7 @@ type Node struct { KubernetesVersion string ControlPlane bool Worker bool + Token string } // VersionedExtraOption holds information on flags to apply to a specific range diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 0fbf3183d030..47401380b97a 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -133,13 +133,13 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node if s == state.Running { if !recreated { - out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) } return h, nil } if !recreated { - out.T(out.Restarting, `Restarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + out.T(out.Restarting, `Restarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) } if err := h.Driver.Start(); err != nil { return h, errors.Wrap(err, "driver start") diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index dcc4f4d7d536..dd2a8e85f5a9 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -34,14 +34,14 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, n config.Node) error { +func Add(cc *config.ClusterConfig, n config.Node) (Starter, error) { if err := config.SaveNode(cc, &n); err != nil { - return errors.Wrap(err, "save node") + return Starter{}, errors.Wrap(err, "save node") } r, p, m, h, err := Provision(cc, &n, false) if err != nil { - return err + return Starter{}, err } s := Starter{ Runner: r, @@ -54,7 +54,7 @@ func Add(cc *config.ClusterConfig, n config.Node) error { } _, err = Start(s, false) - return err + return s, err } // Delete stops and deletes the given node from the given cluster diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 3336e47fb724..b0b9da12c5cc 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -169,7 +169,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "getting control plane bootstrapper") } - joinCmd, err := cpBs.GenerateToken(*starter.Cfg, starter.PreExists) + joinCmd, err := cpBs.GenerateToken(starter.Cfg, starter.Node) if err != nil { return nil, errors.Wrap(err, "generating join token") } From 38ec707f668fc95367dc8576a1e3216574a7d5dd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 1 May 2020 18:04:06 -0700 Subject: [PATCH 03/27] only warn on fresh start --- cmd/minikube/cmd/start.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 583425a69df0..8a087a1358d6 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -289,8 +289,11 @@ func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kub if driver.BareMetal(starter.Cfg.Driver) { exit.WithCodeT(exit.Config, "The none driver is not compatible with multi-node clusters.") } else { - out.Ln("") - warnAboutMultiNode() + // Only warn users on first start. + if existing == nil { + out.Ln("") + warnAboutMultiNode() + } for i := 1; i < numNodes; i++ { nodeName := node.Name(i + 1) n := config.Node{ From daaafd5df4603bf8c0721fb5c685a7f91ba00521 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 4 May 2020 09:26:02 -0700 Subject: [PATCH 04/27] lint --- cmd/minikube/cmd/node_add.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 17dc41b107c9..8ff9a2d31889 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -84,7 +84,10 @@ var nodeAddCmd = &cobra.Command{ if err != nil { glog.Warningf("failed to get control plane bootstrapper: %v", err) } else { - bs.StartCluster(*cc) + err := bs.StartCluster(*cc) + if err != nil { + glog.Warningf("failed to restart cluster: %v", err) + } } out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) From 258be2cbde52ed7d74ee15e9700beeb1755ff6a0 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 4 May 2020 10:43:01 -0700 Subject: [PATCH 05/27] re-enable skipped multi node test --- test/integration/multinode_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index a69f86a43b6d..196a84d12d51 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -139,14 +139,11 @@ func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) } func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) { - // TODO (#7496): remove skip once restarts work - t.Skip("Restarting nodes is broken :(") - // Grab the stopped node name := "m03" // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name)) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name, "--alsologtostderr")) if err != nil { t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) } From 2ec26658a6c1a1d3d06db621da1c8913ee270d52 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 4 May 2020 15:58:01 -0700 Subject: [PATCH 06/27] check kubelet status explicitly --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 14 ++++++++++++++ test/integration/multinode_test.go | 6 ++++++ 2 files changed, 20 insertions(+) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index dfc7ee0ca13a..2f6251eeae72 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -627,6 +627,20 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err if err != nil { return errors.Wrap(err, "running join phase kubelet-start") } + + // This can fail during upgrades if the old pods have not shut down yet + kubeletStatus := func() error { + st := kverify.KubeletStatus(k.c) + if st != state.Running { + return errors.New("kubelet not running") + } + return nil + } + if err = retry.Expo(kubeletStatus, 100*time.Microsecond, 30*time.Second); err != nil { + glog.Warningf("kubelet is not ready: %v", err) + return errors.Wrap(err, "kubelet") + } + return nil } diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 196a84d12d51..cdf0fac0b457 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -161,6 +161,12 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) } + + // Make sure kubectl can connect correctly + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to kubectl get nodes. args %q : %v", rr.Command(), err) + } } func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) { From 494710616b59b15ea7158894c597c40ffe42406d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 5 May 2020 14:42:07 -0700 Subject: [PATCH 07/27] let's change a bunch of stuff --- cmd/minikube/cmd/ip.go | 4 +++- cmd/minikube/cmd/node_start.go | 14 +++++++++----- cmd/minikube/cmd/stop.go | 9 ++++----- pkg/minikube/bootstrapper/bootstrapper.go | 2 +- pkg/minikube/bootstrapper/certs.go | 4 ++-- pkg/minikube/bootstrapper/certs_test.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 4 ++-- pkg/minikube/node/start.go | 13 ++++++------- 8 files changed, 28 insertions(+), 24 deletions(-) diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index 6a2ca32055a1..40a750cd1c32 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -29,6 +29,8 @@ var ipCmd = &cobra.Command{ Long: `Retrieves the IP address of the running cluster, and writes it to STDOUT.`, Run: func(cmd *cobra.Command, args []string) { co := mustload.Running(ClusterFlagValue()) - out.Ln(co.CP.IP.String()) + for _, n := range co.Config.Nodes { + out.Ln(n.IP) + } }, } diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 7399e87123d3..7de0cade58a9 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -20,6 +20,7 @@ import ( "os" "github.com/spf13/cobra" + "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/mustload" @@ -39,16 +40,17 @@ var nodeStartCmd = &cobra.Command{ api, cc := mustload.Partial(ClusterFlagValue()) name := args[0] - if machine.IsRunning(api, name) { - out.T(out.Check, "{{.name}} is already running", out.V{"name": name}) - os.Exit(0) - } - n, _, err := node.Retrieve(cc, name) if err != nil { exit.WithError("retrieving node", err) } + machineName := driver.MachineName(*cc, *n) + if machine.IsRunning(api, machineName) { + out.T(out.Check, "{{.name}} is already running", out.V{"name": name}) + os.Exit(0) + } + r, p, m, h, err := node.Provision(cc, n, false) if err != nil { exit.WithError("provisioning host for node", err) @@ -71,6 +73,8 @@ var nodeStartCmd = &cobra.Command{ exit.WithError("failed to start node", err) } } + + out.T(out.Happy, "Successfully started node {{.name}}!", out.V{"name": machineName}) }, } diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index a0b90f878b67..f7590d8ad43f 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -24,7 +24,6 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" - "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/kubeconfig" @@ -51,10 +50,11 @@ func runStop(cmd *cobra.Command, args []string) { defer api.Close() for _, n := range cc.Nodes { - nonexistent := stop(api, *cc, n) + machineName := driver.MachineName(*cc, n) + nonexistent := stop(api, machineName) if !nonexistent { - out.T(out.Stopped, `Node "{{.node_name}}" stopped.`, out.V{"node_name": n.Name}) + out.T(out.Stopped, `Node "{{.node_name}}" stopped.`, out.V{"node_name": machineName}) } } @@ -67,9 +67,8 @@ func runStop(cmd *cobra.Command, args []string) { } } -func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool { +func stop(api libmachine.API, machineName string) bool { nonexistent := false - machineName := driver.MachineName(cluster, n) tryStop := func() (err error) { err = machine.StopHost(api, machineName) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index b592dd559827..f7a5c294ef87 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -44,7 +44,7 @@ type Bootstrapper interface { GenerateToken(*config.ClusterConfig, *config.Node) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string - SetupCerts(config.KubernetesConfig, config.Node) error + SetupCerts(config.KubernetesConfig, config.Node, bool) error GetAPIServerStatus(string, int) (string, error) } diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 5e4dd8e50ffd..4bf7abae0042 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -47,7 +47,7 @@ import ( ) // SetupCerts gets the generated credentials required to talk to the APIServer. -func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ([]assets.CopyableFile, error) { +func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node, keepContext bool) ([]assets.CopyableFile, error) { localPath := localpath.Profile(k8s.ClusterName) glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP) @@ -99,7 +99,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), - KeepContext: false, + KeepContext: keepContext, } kubeCfg := api.NewConfig() diff --git a/pkg/minikube/bootstrapper/certs_test.go b/pkg/minikube/bootstrapper/certs_test.go index 4f93aad180e9..72f63883c345 100644 --- a/pkg/minikube/bootstrapper/certs_test.go +++ b/pkg/minikube/bootstrapper/certs_test.go @@ -57,7 +57,7 @@ func TestSetupCerts(t *testing.T) { f := command.NewFakeCommandRunner() f.SetCommandToOutput(expected) - _, err := SetupCerts(f, k8s, config.Node{}) + _, err := SetupCerts(f, k8s, config.Node{}, false) if err != nil { t.Fatalf("Error starting cluster: %v", err) } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 2f6251eeae72..e1a1be187c7b 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -720,8 +720,8 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { } // SetupCerts sets up certificates within the cluster. -func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) error { - _, err := bootstrapper.SetupCerts(k.c, k8s, n) +func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node, keepContext bool) error { + _, err := bootstrapper.SetupCerts(k.c, k8s, n, keepContext) return err } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 1fc50b95d4dd..22f2e925166f 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -125,9 +125,10 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "Failed to get bootstrapper") } - if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node); err != nil { + if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node, starter.PreExists); err != nil { return nil, errors.Wrap(err, "setting up certs") } + } var wg sync.WaitGroup @@ -153,12 +154,10 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { prepareNone() } - // TODO: existing cluster should wait for health #7597 - if !starter.PreExists { - if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { - return nil, errors.Wrap(err, "Wait failed") - } + if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { + return nil, errors.Wrap(err, "Wait failed") } + } else { if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil { return nil, errors.Wrap(err, "Updating node") @@ -277,7 +276,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, exit.WithError("Failed to update cluster", err) } - if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { + if err := bs.SetupCerts(cfg.KubernetesConfig, n, false); err != nil { exit.WithError("Failed to setup certs", err) } From 358f167ae80d82e68534f4c2d49360a0bab93f71 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 7 May 2020 11:21:58 -0700 Subject: [PATCH 08/27] keepContext --- pkg/minikube/node/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 919c83adae5e..a620f765be89 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -125,7 +125,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "Failed to get bootstrapper") } - if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node, starter.PreExists); err != nil { + if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node, true); err != nil { return nil, errors.Wrap(err, "setting up certs") } From ad9b4c1a3824c1725949f73b6fc72c3d20c8b32b Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 8 May 2020 13:53:16 -0700 Subject: [PATCH 09/27] save config --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 ++ pkg/minikube/machine/fix.go | 2 +- pkg/minikube/machine/start.go | 23 +++++++++++++++----- pkg/minikube/node/start.go | 7 ------ 4 files changed, 20 insertions(+), 14 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 3130abc1ef07..2ef8c21928ff 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -295,6 +295,8 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() + fmt.Printf("%+v", cfg) + // Before we start, ensure that no paused components are lurking around if err := k.unpause(cfg); err != nil { glog.Warningf("unpause failed: %v", err) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 47401380b97a..6a96f8119754 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -117,7 +117,7 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node glog.Infof("Sleeping 1 second for extra luck!") time.Sleep(1 * time.Second) - h, err = createHost(api, cc, n) + h, err = createHost(api, &cc, &n) if err != nil { return nil, errors.Wrap(err, "recreate") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index e44ad685fd18..65939752fd33 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -81,7 +81,7 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho } if !exists { glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) - h, err := createHost(api, cfg, n) + h, err := createHost(api, &cfg, &n) return h, exists, err } glog.Infoln("Skipping create...Using existing machine configuration") @@ -100,7 +100,7 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options { return &o } -func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) { +func createHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) (*host.Host, error) { glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver) start := time.Now() defer func() { @@ -113,12 +113,12 @@ func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*h See https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/ for more information. To disable this message, run [minikube config set ShowDriverDeprecationNotification false]`) } - showHostInfo(cfg) + showHostInfo(*cfg) def := registry.Driver(cfg.Driver) if def.Empty() { return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.Driver) } - dd, err := def.Config(cfg, n) + dd, err := def.Config(*cfg, *n) if err != nil { return nil, errors.Wrap(err, "config") } @@ -134,7 +134,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*h h.HostOptions.AuthOptions.CertDir = localpath.MiniPath() h.HostOptions.AuthOptions.StorePath = localpath.MiniPath() - h.HostOptions.EngineOptions = engineOptions(cfg) + h.HostOptions.EngineOptions = engineOptions(*cfg) cstart := time.Now() glog.Infof("libmachine.API.Create for %q (driver=%q)", cfg.Name, cfg.Driver) @@ -144,13 +144,24 @@ func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*h } glog.Infof("duration metric: libmachine.API.Create for %q took %s", cfg.Name, time.Since(cstart)) - if err := postStartSetup(h, cfg); err != nil { + if err := postStartSetup(h, *cfg); err != nil { return h, errors.Wrap(err, "post-start") } if err := api.Save(h); err != nil { return nil, errors.Wrap(err, "save") } + // Save IP to config file for subsequent use + ip, err := h.Driver.GetIP() + fmt.Printf("NEW IP = %s\n", ip) + if err != nil { + return h, err + } + n.IP = ip + err = config.SaveNode(cfg, n) + if err != nil { + return h, err + } return h, nil } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 071927174f16..00868e3492f4 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -344,13 +344,6 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. out.FailureT("Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) } - // Save IP to config file for subsequent use - node.IP = ip - err = config.SaveNode(cfg, node) - if err != nil { - return runner, preExists, m, host, errors.Wrap(err, "saving node") - } - return runner, preExists, m, host, err } From 4886030486a4fc22367c4f454fd2cdeabb333454 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 11 May 2020 11:50:51 -0700 Subject: [PATCH 10/27] pass everything by reference since we actively change node IP --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 -- pkg/minikube/machine/cluster_test.go | 32 ++++++++++---------- pkg/minikube/machine/fix.go | 16 +++++----- pkg/minikube/machine/start.go | 7 ++--- pkg/minikube/node/start.go | 8 ++--- 5 files changed, 30 insertions(+), 35 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 2ef8c21928ff..3130abc1ef07 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -295,8 +295,6 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() - fmt.Printf("%+v", cfg) - // Before we start, ensure that no paused components are lurking around if err := k.unpause(cfg); err != nil { glog.Warningf("unpause failed: %v", err) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 310f29e2eb47..7028b848ad65 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -81,7 +81,7 @@ func TestCreateHost(t *testing.T) { t.Fatal("Machine already exists.") } - _, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + _, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -122,7 +122,7 @@ func TestStartHostExists(t *testing.T) { api := tests.NewMockAPI(t) // Create an initial host. - ih, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + ih, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -142,7 +142,7 @@ func TestStartHostExists(t *testing.T) { n := config.Node{Name: ih.Name} // This should pass without calling Create because the host exists already. - h, _, err := StartHost(api, mc, n) + h, _, err := StartHost(api, &mc, &n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -161,7 +161,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -175,7 +175,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n := config.Node{Name: h.Name} // This should pass with creating host, while machine does not exist. - h, _, err = StartHost(api, mc, n) + h, _, err = StartHost(api, &mc, &n) if err != nil { if err != constants.ErrMachineMissing { t.Fatalf("Error starting host: %v", err) @@ -188,7 +188,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { n.Name = h.Name // Second call. This should pass without calling Create because the host exists already. - h, _, err = StartHost(api, mc, n) + h, _, err = StartHost(api, &mc, &n) if err != nil { t.Fatalf("Error starting host: %v", err) } @@ -207,7 +207,7 @@ func TestStartStoppedHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) // Create an initial host. - h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Fatalf("Error creating host: %v", err) } @@ -220,7 +220,7 @@ func TestStartStoppedHost(t *testing.T) { mc := defaultClusterConfig mc.Name = h.Name n := config.Node{Name: h.Name} - h, _, err = StartHost(api, mc, n) + h, _, err = StartHost(api, &mc, &n) if err != nil { t.Fatal("Error starting host.") } @@ -247,7 +247,7 @@ func TestStartHost(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - h, _, err := StartHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -283,7 +283,7 @@ func TestStartHostConfig(t *testing.T) { DockerOpt: []string{"param=value"}, } - h, _, err := StartHost(api, cfg, config.Node{Name: "minikube"}) + h, _, err := StartHost(api, &cfg, &config.Node{Name: "minikube"}) if err != nil { t.Fatal("Error starting host.") } @@ -313,7 +313,7 @@ func TestStopHostError(t *testing.T) { func TestStopHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -332,7 +332,7 @@ func TestStopHost(t *testing.T) { func TestDeleteHost(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - if _, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}); err != nil { + if _, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}); err != nil { t.Errorf("createHost failed: %v", err) } @@ -347,7 +347,7 @@ func TestDeleteHost(t *testing.T) { func TestDeleteHostErrorDeletingVM(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) - h, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + h, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -364,7 +364,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) { RegisterMockDriver(t) api := tests.NewMockAPI(t) api.RemoveError = true - if _, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}); err != nil { + if _, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}); err != nil { t.Errorf("createHost failed: %v", err) } @@ -378,7 +378,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) { api := tests.NewMockAPI(t) // Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel) api.NotExistError = true - _, err := createHost(api, defaultClusterConfig, config.Node{Name: "minikube"}) + _, err := createHost(api, &defaultClusterConfig, &config.Node{Name: "minikube"}) if err != nil { t.Errorf("createHost failed: %v", err) } @@ -409,7 +409,7 @@ func TestStatus(t *testing.T) { checkState(state.None.String(), m) - if _, err := createHost(api, cc, config.Node{Name: "minikube"}); err != nil { + if _, err := createHost(api, &cc, &config.Node{Name: "minikube"}); err != nil { t.Errorf("createHost failed: %v", err) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 6a96f8119754..87a0e66062cc 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -48,14 +48,14 @@ const ( ) // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes -func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { +func fixHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*host.Host, error) { start := time.Now() glog.Infof("fixHost starting: %s", n.Name) defer func() { glog.Infof("fixHost completed within %s", time.Since(start)) }() - h, err := api.Load(driver.MachineName(cc, n)) + h, err := api.Load(driver.MachineName(*cc, *n)) if err != nil { return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } @@ -72,7 +72,7 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. // Avoid reprovisioning "none" driver because provision.Detect requires SSH if !driver.BareMetal(h.Driver.DriverName()) { - e := engineOptions(cc) + e := engineOptions(*cc) h.HostOptions.EngineOptions.Env = e.Env err = provisionDockerMachine(h) if err != nil { @@ -84,7 +84,7 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, nil } - if err := postStartSetup(h, cc); err != nil { + if err := postStartSetup(h, *cc); err != nil { return h, errors.Wrap(err, "post-start") } @@ -96,8 +96,8 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, ensureSyncedGuestClock(h, driverName) } -func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { - machineName := driver.MachineName(cc, n) +func recreateIfNeeded(api libmachine.API, cc *config.ClusterConfig, n *config.Node, h *host.Host) (*host.Host, error) { + machineName := driver.MachineName(*cc, *n) machineType := driver.MachineType(cc.Driver) recreated := false s, serr := h.Driver.GetState() @@ -112,12 +112,12 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node if !me || err == constants.ErrMachineMissing { out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) - demolish(api, cc, n, h) + demolish(api, *cc, *n, h) glog.Infof("Sleeping 1 second for extra luck!") time.Sleep(1 * time.Second) - h, err = createHost(api, &cc, &n) + h, err = createHost(api, cc, n) if err != nil { return nil, errors.Wrap(err, "recreate") } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index 65939752fd33..35ba8dbaa9f2 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -61,8 +61,8 @@ var ( ) // StartHost starts a host VM. -func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, bool, error) { - machineName := driver.MachineName(cfg, n) +func StartHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) (*host.Host, bool, error) { + machineName := driver.MachineName(*cfg, *n) // Prevent machine-driver boot races, as well as our own certificate race releaser, err := acquireMachinesLock(machineName) @@ -81,7 +81,7 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho } if !exists { glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n) - h, err := createHost(api, &cfg, &n) + h, err := createHost(api, cfg, n) return h, exists, err } glog.Infoln("Skipping create...Using existing machine configuration") @@ -153,7 +153,6 @@ func createHost(api libmachine.API, cfg *config.ClusterConfig, n *config.Node) ( } // Save IP to config file for subsequent use ip, err := h.Driver.GetIP() - fmt.Printf("NEW IP = %s\n", ip) if err != nil { return h, err } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index b5a4c92494f3..2f04663ee823 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -21,7 +21,6 @@ import ( "net" "os" "os/exec" - "runtime/debug" "strconv" "strings" "sync" @@ -253,7 +252,6 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k err = cr.Enable(disableOthers, forceSystemd()) if err != nil { - debug.PrintStack() exit.WithError("Failed to enable container runtime", err) } @@ -324,7 +322,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. if err != nil { return runner, preExists, m, host, errors.Wrap(err, "Failed to get machine client") } - host, preExists, err = startHost(m, *cfg, *node) + host, preExists, err = startHost(m, cfg, node) if err != nil { return runner, preExists, m, host, errors.Wrap(err, "Failed to start host") } @@ -348,7 +346,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node) (runner command. } // startHost starts a new minikube host using a VM or None -func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, bool, error) { +func startHost(api libmachine.API, cc *config.ClusterConfig, n *config.Node) (*host.Host, bool, error) { host, exists, err := machine.StartHost(api, cc, n) if err == nil { return host, exists, nil @@ -356,7 +354,7 @@ func startHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*hos // NOTE: People get very cranky if you delete their prexisting VM. Only delete new ones. if !exists { - err := machine.DeleteHost(api, driver.MachineName(cc, n)) + err := machine.DeleteHost(api, driver.MachineName(*cc, *n)) if err != nil { glog.Warningf("delete host: %v", err) } From 335379dc59dabe80fc33dea5a8c43e5e56d0eb81 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 11 May 2020 19:25:53 -0700 Subject: [PATCH 11/27] it works --- cmd/minikube/cmd/start.go | 28 +++++++++++++------ cmd/minikube/cmd/start_flags.go | 2 +- pkg/minikube/bootstrapper/bootstrapper.go | 2 +- .../bootstrapper/bsutil/kverify/api_server.go | 2 +- pkg/minikube/bootstrapper/certs.go | 13 +++++---- pkg/minikube/bootstrapper/certs_test.go | 2 +- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 17 +++++------ pkg/minikube/machine/fix.go | 1 + pkg/minikube/node/start.go | 21 ++++++++------ test/integration/multinode_test.go | 2 +- 10 files changed, 54 insertions(+), 36 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index e878da864ef8..b1e8103f5a3d 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -884,16 +884,26 @@ func validateRegistryMirror() { } } -func createNode(cc config.ClusterConfig, kubeNodeName string) (config.ClusterConfig, config.Node, error) { +func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) { // Create the initial node, which will necessarily be a control plane - cp := config.Node{ - Port: cc.KubernetesConfig.NodePort, - KubernetesVersion: getKubernetesVersion(&cc), - Name: kubeNodeName, - ControlPlane: true, - Worker: true, - } - cc.Nodes = []config.Node{cp} + var cp config.Node + var err error + if existing == nil { + cp = config.Node{ + Port: cc.KubernetesConfig.NodePort, + KubernetesVersion: getKubernetesVersion(&cc), + Name: kubeNodeName, + ControlPlane: true, + Worker: true, + } + cc.Nodes = []config.Node{cp} + } else { + cp, err = config.PrimaryControlPlane(existing) + if err != nil { + return cc, config.Node{}, err + } + cc.Nodes = existing.Nodes + } return cc, cp, nil } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 44a212018d4e..0ce3e59272c1 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -348,7 +348,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k if driver.BareMetal(cc.Driver) { kubeNodeName = "m01" } - return createNode(cc, kubeNodeName) + return createNode(cc, kubeNodeName, existing) } // updateExistingConfigFromFlags will update the existing config from the flags - used on a second start diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 63e53e1b701f..0589616c688f 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -44,7 +44,7 @@ type Bootstrapper interface { GenerateToken(*config.ClusterConfig, *config.Node) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string - SetupCerts(config.KubernetesConfig, config.Node, bool) error + SetupCerts(config.KubernetesConfig, config.Node) error GetAPIServerStatus(string, int) (string, error) } diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go index d305a8b7c066..753323f807c0 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/api_server.go @@ -105,7 +105,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c } if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil { - return fmt.Errorf("apiserver healthz never reported healthy") + return fmt.Errorf("apiserver healthz never reported healthy: %v", err) } vcheck := func() (bool, error) { diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 4bf7abae0042..50597ec8b910 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -47,7 +47,7 @@ import ( ) // SetupCerts gets the generated credentials required to talk to the APIServer. -func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node, keepContext bool) ([]assets.CopyableFile, error) { +func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ([]assets.CopyableFile, error) { localPath := localpath.Profile(k8s.ClusterName) glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP) @@ -56,9 +56,12 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node, return nil, errors.Wrap(err, "shared CA certs") } - xfer, err := generateProfileCerts(k8s, n, ccs) - if err != nil { - return nil, errors.Wrap(err, "profile certs") + var xfer []string + if n.ControlPlane { + xfer, err = generateProfileCerts(k8s, n, ccs) + if err != nil { + return nil, errors.Wrap(err, "profile certs") + } } xfer = append(xfer, ccs.caCert) @@ -99,7 +102,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node, ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), - KeepContext: keepContext, + KeepContext: false, } kubeCfg := api.NewConfig() diff --git a/pkg/minikube/bootstrapper/certs_test.go b/pkg/minikube/bootstrapper/certs_test.go index 72f63883c345..4f93aad180e9 100644 --- a/pkg/minikube/bootstrapper/certs_test.go +++ b/pkg/minikube/bootstrapper/certs_test.go @@ -57,7 +57,7 @@ func TestSetupCerts(t *testing.T) { f := command.NewFakeCommandRunner() f.SetCommandToOutput(expected) - _, err := SetupCerts(f, k8s, config.Node{}, false) + _, err := SetupCerts(f, k8s, config.Node{}) if err != nil { t.Fatalf("Error starting cluster: %v", err) } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 99fc6405a7dd..248becf24438 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -623,10 +623,6 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) error { - if kverify.KubeletStatus(k.c) == state.Running { - return nil - } - if err := k.clearStaleConfigs(cc); err != nil { return errors.Wrap(err, "clearing stale configs") } @@ -640,11 +636,12 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err return errors.Wrap(err, "getting control plane endpoint") } - // Make sure to account for if n.Token doesn't exist for older configs cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s --discovery-token-unsafe-skip-ca-verification", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(host, strconv.Itoa(port)), n.Token) _, err = k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) if err != nil { - return errors.Wrap(err, "running join phase kubelet-start") + if !strings.Contains(err.Error(), "status \"Ready\" already exists in the cluster") { + return errors.Wrap(err, "running join phase kubelet-start") + } } // This can fail during upgrades if the old pods have not shut down yet @@ -665,6 +662,10 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err // GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig, n *config.Node) (string, error) { + if n.Token != "" { + return "", nil + } + // Generate the token so we can store it genTokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token generate", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) r, err := k.c.RunCmd(genTokenCmd) @@ -739,8 +740,8 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { } // SetupCerts sets up certificates within the cluster. -func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node, keepContext bool) error { - _, err := bootstrapper.SetupCerts(k.c, k8s, n, keepContext) +func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) error { + _, err := bootstrapper.SetupCerts(k.c, k8s, n) return err } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 9a358777908c..0777abb2451e 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -147,6 +147,7 @@ func recreateIfNeeded(api libmachine.API, cc *config.ClusterConfig, n *config.No if err := api.Save(h); err != nil { return h, errors.Wrap(err, "save") } + return h, nil } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 2f04663ee823..e0d1377c95fd 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -124,7 +124,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "Failed to get bootstrapper") } - if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node, true); err != nil { + if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node); err != nil { return nil, errors.Wrap(err, "setting up certs") } @@ -163,14 +163,17 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } // Make sure to use the command runner for the control plane to generate the join token - cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - return nil, errors.Wrap(err, "getting control plane bootstrapper") - } + var joinCmd string + if !starter.PreExists || starter.Node.Token == "" { + cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) + if err != nil { + return nil, errors.Wrap(err, "getting control plane bootstrapper") + } - joinCmd, err := cpBs.GenerateToken(starter.Cfg, starter.Node) - if err != nil { - return nil, errors.Wrap(err, "generating join token") + joinCmd, err = cpBs.GenerateToken(starter.Cfg, starter.Node) + if err != nil { + return nil, errors.Wrap(err, "generating join token") + } } if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd, starter.PreExists); err != nil { @@ -278,7 +281,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, exit.WithError("Failed to update cluster", err) } - if err := bs.SetupCerts(cfg.KubernetesConfig, n, false); err != nil { + if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil { exit.WithError("Failed to setup certs", err) } diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index cdf0fac0b457..474fb7bf5227 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -143,7 +143,7 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin name := "m03" // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name, "--alsologtostderr")) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name)) if err != nil { t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) } From 562fd5b2dfaead064cddca3a9d8176071e117197 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 11 May 2020 21:59:03 -0700 Subject: [PATCH 12/27] improve node stop output --- cmd/minikube/cmd/node_stop.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index 3f5c8d743308..ecf6f2ede7cd 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -49,6 +49,8 @@ var nodeStopCmd = &cobra.Command{ if err != nil { out.FatalT("Failed to stop node {{.name}}", out.V{"name": name}) } + + out.T(out.Stopped, "Successfully stopped node {{.name}}", out.V{"name": name}) }, } From 009e056f8c9b4aaac42d9eeddba18a07ecc0f658 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Fri, 15 May 2020 15:39:52 -0700 Subject: [PATCH 13/27] revert ip change --- cmd/minikube/cmd/ip.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/minikube/cmd/ip.go b/cmd/minikube/cmd/ip.go index 40a750cd1c32..6a2ca32055a1 100644 --- a/cmd/minikube/cmd/ip.go +++ b/cmd/minikube/cmd/ip.go @@ -29,8 +29,6 @@ var ipCmd = &cobra.Command{ Long: `Retrieves the IP address of the running cluster, and writes it to STDOUT.`, Run: func(cmd *cobra.Command, args []string) { co := mustload.Running(ClusterFlagValue()) - for _, n := range co.Config.Nodes { - out.Ln(n.IP) - } + out.Ln(co.CP.IP.String()) }, } From 6abb3996685c09337212e1394dfe654c9df5c4cd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 11:28:35 -0700 Subject: [PATCH 14/27] make sure to call the correct control plane address --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 13 ++----------- pkg/minikube/machine/fix.go | 2 +- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 248becf24438..719a1b5b500e 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -627,17 +627,8 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err return errors.Wrap(err, "clearing stale configs") } - cp, err := config.PrimaryControlPlane(&cc) - if err != nil { - return errors.Wrap(err, "getting primary control plane") - } - host, _, port, err := driver.ControlPlaneEndpoint(&cc, &cp, cc.Driver) - if err != nil { - return errors.Wrap(err, "getting control plane endpoint") - } - - cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s --discovery-token-unsafe-skip-ca-verification", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(host, strconv.Itoa(port)), n.Token) - _, err = k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) + cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s --discovery-token-unsafe-skip-ca-verification", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(constants.APIServerPort)), n.Token) + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) if err != nil { if !strings.Contains(err.Error(), "status \"Ready\" already exists in the cluster") { return errors.Wrap(err, "running join phase kubelet-start") diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index e9e723bba76e..c01e48a5f6aa 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -111,7 +111,7 @@ func recreateIfNeeded(api libmachine.API, cc *config.ClusterConfig, n *config.No } if !me || err == constants.ErrMachineMissing { - out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": machineName, "machine_type": machineType}) demolish(api, *cc, *n, h) glog.Infof("Sleeping 1 second for extra luck!") From 49cc6ae2599b8a40d12529a6ef66b5967058b904 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 13:13:57 -0700 Subject: [PATCH 15/27] revert old unrelated changes: --- cmd/minikube/cmd/node_add.go | 16 +--------------- cmd/minikube/cmd/start.go | 2 +- pkg/minikube/bootstrapper/certs.go | 9 +++------ pkg/minikube/node/node.go | 8 ++++---- 4 files changed, 9 insertions(+), 26 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 8ff9a2d31889..2558dd688476 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -17,11 +17,8 @@ limitations under the License. package cmd import ( - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" - cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" - "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" @@ -66,7 +63,7 @@ var nodeAddCmd = &cobra.Command{ } } - starter, err := node.Add(cc, n) + err := node.Add(cc, n) if err != nil { _, err := maybeDeleteAndRetry(*cc, n, nil, err) if err != nil { @@ -79,17 +76,6 @@ var nodeAddCmd = &cobra.Command{ exit.WithError("failed to save config", err) } - // Restart the control plane to pick up the new CNI - bs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, cc, cmdcfg.Bootstrapper) - if err != nil { - glog.Warningf("failed to get control plane bootstrapper: %v", err) - } else { - err := bs.StartCluster(*cc) - if err != nil { - glog.Warningf("failed to restart cluster: %v", err) - } - } - out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name}) }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index b3a43ee2accc..d229da56c5e6 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -312,7 +312,7 @@ func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kub KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, } out.Ln("") // extra newline for clarity on the command line - _, err := node.Add(starter.Cfg, n) + err := node.Add(starter.Cfg, n) if err != nil { return nil, errors.Wrap(err, "adding node") } diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index db339ca01dc2..fbc3481bf71d 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -56,12 +56,9 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) return nil, errors.Wrap(err, "shared CA certs") } - var xfer []string - if n.ControlPlane { - xfer, err = generateProfileCerts(k8s, n, ccs) - if err != nil { - return nil, errors.Wrap(err, "profile certs") - } + xfer, err := generateProfileCerts(k8s, n, ccs) + if err != nil { + return nil, errors.Wrap(err, "profile certs") } xfer = append(xfer, ccs.caCert) diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index dd2a8e85f5a9..dcc4f4d7d536 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -34,14 +34,14 @@ const ( ) // Add adds a new node config to an existing cluster. -func Add(cc *config.ClusterConfig, n config.Node) (Starter, error) { +func Add(cc *config.ClusterConfig, n config.Node) error { if err := config.SaveNode(cc, &n); err != nil { - return Starter{}, errors.Wrap(err, "save node") + return errors.Wrap(err, "save node") } r, p, m, h, err := Provision(cc, &n, false) if err != nil { - return Starter{}, err + return err } s := Starter{ Runner: r, @@ -54,7 +54,7 @@ func Add(cc *config.ClusterConfig, n config.Node) (Starter, error) { } _, err = Start(s, false) - return s, err + return err } // Delete stops and deletes the given node from the given cluster From 43d37ec9f3d9519df3e8cce6e122712b951602ea Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 14:19:50 -0700 Subject: [PATCH 16/27] debugging --- pkg/drivers/kic/oci/network.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/drivers/kic/oci/network.go b/pkg/drivers/kic/oci/network.go index 3cb1cf859c70..66de5656a500 100644 --- a/pkg/drivers/kic/oci/network.go +++ b/pkg/drivers/kic/oci/network.go @@ -104,6 +104,8 @@ func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) { return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID) } } else { + rr, err = runCmd(exec.Command(ociBin, "inspect", ociID)) + fmt.Println(rr.Stdout.String()) rr, err = runCmd(exec.Command(ociBin, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)) if err != nil { return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID) From 3caad25d0e7708def5357d51020c8ba23db47050 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 14:41:15 -0700 Subject: [PATCH 17/27] lint --- pkg/drivers/kic/oci/network.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/drivers/kic/oci/network.go b/pkg/drivers/kic/oci/network.go index 66de5656a500..26efa7f1d6db 100644 --- a/pkg/drivers/kic/oci/network.go +++ b/pkg/drivers/kic/oci/network.go @@ -106,6 +106,9 @@ func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) { } else { rr, err = runCmd(exec.Command(ociBin, "inspect", ociID)) fmt.Println(rr.Stdout.String()) + if err != nil { + fmt.Println(err.Error()) + } rr, err = runCmd(exec.Command(ociBin, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)) if err != nil { return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID) From 13f0b87f59a0f1cfd04a897ebffc806114baffb5 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 15:49:17 -0700 Subject: [PATCH 18/27] better debugging --- pkg/drivers/kic/oci/network.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/drivers/kic/oci/network.go b/pkg/drivers/kic/oci/network.go index 26efa7f1d6db..5c05ad559499 100644 --- a/pkg/drivers/kic/oci/network.go +++ b/pkg/drivers/kic/oci/network.go @@ -105,7 +105,7 @@ func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) { } } else { rr, err = runCmd(exec.Command(ociBin, "inspect", ociID)) - fmt.Println(rr.Stdout.String()) + fmt.Println(rr.Stderr.String()) if err != nil { fmt.Println(err.Error()) } From 95584eac434e28bc76357e4a80324fc65b0f5d1d Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 16:08:44 -0700 Subject: [PATCH 19/27] skip start/stop on github actions --- pkg/drivers/kic/oci/network.go | 5 ----- test/integration/multinode_test.go | 10 ++++++++++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/drivers/kic/oci/network.go b/pkg/drivers/kic/oci/network.go index 5c05ad559499..3cb1cf859c70 100644 --- a/pkg/drivers/kic/oci/network.go +++ b/pkg/drivers/kic/oci/network.go @@ -104,11 +104,6 @@ func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) { return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID) } } else { - rr, err = runCmd(exec.Command(ociBin, "inspect", ociID)) - fmt.Println(rr.Stderr.String()) - if err != nil { - fmt.Println(err.Error()) - } rr, err = runCmd(exec.Command(ociBin, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)) if err != nil { return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 62d16abb4ac3..458d79f27ac8 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -140,6 +140,16 @@ func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) } func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) { + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}") + if err != nil { + t.Fatalf("docker is broken: %v", err) + } + if strings.Contains(rr.Stdout.String(), "azure") { + t.Skip("kic containers are not supported on docker's azure") + } + } + // Grab the stopped node name := "m03" From 023b2dc91165ac1996dd6cc1bc1809b54453b5f1 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 16:56:28 -0700 Subject: [PATCH 20/27] fix test --- test/integration/multinode_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 458d79f27ac8..4777d16bf218 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -141,7 +141,7 @@ func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) { if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}") + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) if err != nil { t.Fatalf("docker is broken: %v", err) } @@ -149,7 +149,7 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin t.Skip("kic containers are not supported on docker's azure") } } - + // Grab the stopped node name := "m03" From c4122377c4166e3f8e3b9fd2f4f4aa8c33263f77 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 20 May 2020 18:53:49 -0700 Subject: [PATCH 21/27] add postmortem logs --- test/integration/multinode_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 4777d16bf218..b2c55ebf68d7 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -50,6 +50,7 @@ func TestMultiNode(t *testing.T) { for _, tc := range tests { tc := tc t.Run(tc.name, func(t *testing.T) { + defer PostMortemLogs(t, profile) tc.validator(ctx, t, profile) }) } From b5167834d006da71e6380824f2c87590ea9cc238 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 27 May 2020 09:58:03 -0700 Subject: [PATCH 22/27] it works! and simpler --- cmd/minikube/cmd/start.go | 34 ++++++---- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 71 ++++++-------------- pkg/minikube/config/types.go | 1 - pkg/minikube/cruntime/cruntime.go | 6 +- pkg/minikube/machine/cache_images.go | 10 +-- pkg/minikube/node/start.go | 17 ++--- test/integration/multinode_test.go | 57 +++++++++++++++- 7 files changed, 116 insertions(+), 80 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index e15abb74dbec..6120621e909c 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -301,19 +301,29 @@ func startWithDriver(starter node.Starter, existing *config.ClusterConfig) (*kub if existing == nil { out.Ln("") warnAboutMultiNode() - } - for i := 1; i < numNodes; i++ { - nodeName := node.Name(i + 1) - n := config.Node{ - Name: nodeName, - Worker: true, - ControlPlane: false, - KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + + for i := 1; i < numNodes; i++ { + nodeName := node.Name(i + 1) + n := config.Node{ + Name: nodeName, + Worker: true, + ControlPlane: false, + KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + } + out.Ln("") // extra newline for clarity on the command line + err := node.Add(starter.Cfg, n) + if err != nil { + return nil, errors.Wrap(err, "adding node") + } } - out.Ln("") // extra newline for clarity on the command line - err := node.Add(starter.Cfg, n) - if err != nil { - return nil, errors.Wrap(err, "adding node") + } else { + for _, n := range existing.Nodes { + if !n.ControlPlane { + err := node.Add(starter.Cfg, n) + if err != nil { + return nil, errors.Wrap(err, "adding node") + } + } } } } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 719a1b5b500e..f7a373381bd4 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -604,48 +604,29 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC glog.Infof("JoinCluster complete in %s", time.Since(start)) }() - if preExists { - return k.restartWorker(cc, n) - } // Join the master by specifying its token joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, driver.MachineName(cc, n)) - out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) - if err != nil { - return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out) - } - - if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { - return errors.Wrap(err, "starting kubelet") - } - return nil -} - -func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) error { - - if err := k.clearStaleConfigs(cc); err != nil { - return errors.Wrap(err, "clearing stale configs") - } - - cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s --discovery-token-unsafe-skip-ca-verification", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(constants.APIServerPort)), n.Token) - _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) - if err != nil { - if !strings.Contains(err.Error(), "status \"Ready\" already exists in the cluster") { - return errors.Wrap(err, "running join phase kubelet-start") + join := func() error { + // reset first to clear any possibly existing state + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)))) + if err != nil { + glog.Infof("kubeadm reset failed, continuing anyway: %v", err) } - } - // This can fail during upgrades if the old pods have not shut down yet - kubeletStatus := func() error { - st := kverify.KubeletStatus(k.c) - if st != state.Running { - return errors.New("kubelet not running") + out, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", joinCmd)) + if err != nil { + return errors.Wrapf(err, "cmd failed: %s\n%+v\n", joinCmd, out.Output()) } return nil } - if err = retry.Expo(kubeletStatus, 100*time.Microsecond, 30*time.Second); err != nil { - glog.Warningf("kubelet is not ready: %v", err) - return errors.Wrap(err, "kubelet") + + if err := retry.Expo(join, 10*time.Second, 1*time.Minute); err != nil { + return errors.Wrap(err, "joining cp") + } + + if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet")); err != nil { + return errors.Wrap(err, "starting kubelet") } return nil @@ -653,22 +634,9 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err // GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig, n *config.Node) (string, error) { - if n.Token != "" { - return "", nil - } - - // Generate the token so we can store it - genTokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token generate", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) - r, err := k.c.RunCmd(genTokenCmd) - if err != nil { - return "", errors.Wrap(err, "generating bootstrap token") - } - token := strings.TrimSpace(r.Stdout.String()) - n.Token = token - // Take that generated token and use it to get a kubeadm join command - tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create %s --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), token)) - r, err = k.c.RunCmd(tokenCmd) + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) + r, err := k.c.RunCmd(tokenCmd) if err != nil { return "", errors.Wrap(err, "generating join command") } @@ -676,6 +644,9 @@ func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig, n *config.Node) ( joinCmd := r.Stdout.String() joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + if cc.KubernetesConfig.CRISocket != "" { + joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, cc.KubernetesConfig.CRISocket) + } // Save the new token for later use err = config.SaveNode(cc, n) @@ -806,7 +777,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru // Copy the default CNI config (k8s.conf), so that kubelet can successfully // start a Pod in the case a user hasn't manually installed any CNI plugin // and minikube was started with "--extra-config=kubelet.network-plugin=cni". - if cfg.KubernetesConfig.EnableDefaultCNI { + if cfg.KubernetesConfig.EnableDefaultCNI && !config.MultiNode(cfg) { files = append(files, assets.NewMemoryAssetTarget([]byte(defaultCNIConfig), bsutil.DefaultCNIConfigPath, "0644")) } diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index f1ca2a71f010..f9ebbf527394 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -112,7 +112,6 @@ type Node struct { KubernetesVersion string ControlPlane bool Worker bool - Token string } // VersionedExtraOption holds information on flags to apply to a specific range diff --git a/pkg/minikube/cruntime/cruntime.go b/pkg/minikube/cruntime/cruntime.go index 3ba208a47a81..0cc5046f0d51 100644 --- a/pkg/minikube/cruntime/cruntime.go +++ b/pkg/minikube/cruntime/cruntime.go @@ -136,7 +136,11 @@ func New(c Config) (Manager, error) { switch c.Type { case "", "docker": - return &Docker{Socket: c.Socket, Runner: c.Runner, Init: sm}, nil + return &Docker{ + Socket: c.Socket, + Runner: c.Runner, + Init: sm, + }, nil case "crio", "cri-o": return &CRIO{ Socket: c.Socket, diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index e677e5e58db1..87e21ef2f1bc 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -196,8 +196,8 @@ func CacheAndLoadImages(images []string) error { status, err := Status(api, m) if err != nil { - glog.Warningf("error getting status for %s: %v", pName, err) - failed = append(failed, pName) + glog.Warningf("error getting status for %s: %v", m, err) + failed = append(failed, m) continue } @@ -205,7 +205,7 @@ func CacheAndLoadImages(images []string) error { h, err := api.Load(m) if err != nil { glog.Warningf("Failed to load machine %q: %v", m, err) - failed = append(failed, pName) + failed = append(failed, m) continue } cr, err := CommandRunner(h) @@ -214,10 +214,10 @@ func CacheAndLoadImages(images []string) error { } err = LoadImages(c, cr, images, constants.ImageCacheDir) if err != nil { - failed = append(failed, pName) + failed = append(failed, m) glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err) } - succeeded = append(succeeded, pName) + succeeded = append(succeeded, m) } } } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 517881f10bda..19cca045b5b2 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -163,17 +163,14 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } // Make sure to use the command runner for the control plane to generate the join token - var joinCmd string - if !starter.PreExists || starter.Node.Token == "" { - cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - return nil, errors.Wrap(err, "getting control plane bootstrapper") - } + cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) + if err != nil { + return nil, errors.Wrap(err, "getting control plane bootstrapper") + } - joinCmd, err = cpBs.GenerateToken(starter.Cfg, starter.Node) - if err != nil { - return nil, errors.Wrap(err, "generating join token") - } + joinCmd, err := cpBs.GenerateToken(starter.Cfg, starter.Node) + if err != nil { + return nil, errors.Wrap(err, "generating join token") } if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd, starter.PreExists); err != nil { diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index dc62f83cd5ca..62f1b33ccceb 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -45,6 +45,8 @@ func TestMultiNode(t *testing.T) { {"AddNode", validateAddNodeToMultiNode}, {"StopNode", validateStopRunningNode}, {"StartAfterStop", validateStartNodeAfterStop}, + {"StopMultiNode", validateStopMultiNodeCluster}, + {"RestartMultiNode", validateRestartMultiNodeCluster}, {"DeleteNode", validateDeleteNodeFromMultiNode}, } for _, tc := range tests { @@ -149,8 +151,9 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin } // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", ThirdNodeName)) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", ThirdNodeName, "--alsologtostderr")) if err != nil { + t.Logf(rr.Stderr.String()) t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) } @@ -175,6 +178,58 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin } } +func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { + // Run minikube node stop on that node + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop")) + if err != nil { + t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + } + + // Run status to see the stopped hosts + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) + // Exit code 7 means one host is stopped, which we are expecting + if err != nil && rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + // Make sure minikube status shows 3 stopped nodes + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) + if err != nil && rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + if strings.Count(rr.Stdout.String(), "host: Stopped") != 3 { + t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 3 { + t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + +func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { + // Restart a full cluster with minikube start + startArgs := append([]string{"start", "-p", profile}, StartArgs()...) + rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) + } + + // Make sure minikube status shows 3 running nodes + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) + if err != nil { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + + if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } + + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) + } +} + func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) { // Start the node back up From 101f2da01df789d789285939e8911e400cd1987a Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 27 May 2020 10:10:46 -0700 Subject: [PATCH 23/27] revert changes to node_add --- cmd/minikube/cmd/node_add.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 2558dd688476..8e7280ae4322 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -63,8 +63,7 @@ var nodeAddCmd = &cobra.Command{ } } - err := node.Add(cc, n) - if err != nil { + if err := node.Add(cc, n); err != nil { _, err := maybeDeleteAndRetry(*cc, n, nil, err) if err != nil { exit.WithError("failed to add node", err) @@ -72,6 +71,7 @@ var nodeAddCmd = &cobra.Command{ } // Add CNI config if it's not already there + // We need to run kubeadm.init here as well if err := config.MultiNodeCNIConfig(cc); err != nil { exit.WithError("failed to save config", err) } From a7e483a8a42c0cd46b325768904db5399bbb264a Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 27 May 2020 10:17:01 -0700 Subject: [PATCH 24/27] let's simplify kubeadm again --- pkg/minikube/bootstrapper/bootstrapper.go | 4 ++-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 9 ++------- pkg/minikube/node/start.go | 4 ++-- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 0589616c688f..4d18749ccd23 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -39,9 +39,9 @@ type Bootstrapper interface { UpdateCluster(config.ClusterConfig) error DeleteCluster(config.KubernetesConfig) error WaitForNode(config.ClusterConfig, config.Node, time.Duration) error - JoinCluster(config.ClusterConfig, config.Node, string, bool) error + JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - GenerateToken(*config.ClusterConfig, *config.Node) (string, error) + GenerateToken(config.ClusterConfig) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index f7a373381bd4..2e0cb55e3e19 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -597,7 +597,7 @@ func (k *Bootstrapper) restartControlPlane(cfg config.ClusterConfig) error { } // JoinCluster adds a node to an existing cluster -func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string, preExists bool) error { +func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinCmd string) error { start := time.Now() glog.Infof("JoinCluster: %+v", cc) defer func() { @@ -633,7 +633,7 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC } // GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token -func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig, n *config.Node) (string, error) { +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { // Take that generated token and use it to get a kubeadm join command tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) r, err := k.c.RunCmd(tokenCmd) @@ -648,11 +648,6 @@ func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig, n *config.Node) ( joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, cc.KubernetesConfig.CRISocket) } - // Save the new token for later use - err = config.SaveNode(cc, n) - if err != nil { - return joinCmd, errors.Wrap(err, "saving node") - } return joinCmd, nil } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 19cca045b5b2..d665c8f49491 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -168,12 +168,12 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "getting control plane bootstrapper") } - joinCmd, err := cpBs.GenerateToken(starter.Cfg, starter.Node) + joinCmd, err := cpBs.GenerateToken(*starter.Cfg) if err != nil { return nil, errors.Wrap(err, "generating join token") } - if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd, starter.PreExists); err != nil { + if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { return nil, errors.Wrap(err, "joining cluster") } } From 606a307d146d8fcb8a2367dfb77e81b87370b5b6 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 27 May 2020 10:30:14 -0700 Subject: [PATCH 25/27] skip restart test on github actions --- test/integration/multinode_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 62f1b33ccceb..a17168bfa30e 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -208,6 +208,15 @@ func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile str } func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) + if err != nil { + t.Fatalf("docker is broken: %v", err) + } + if strings.Contains(rr.Stdout.String(), "azure") { + t.Skip("kic containers are not supported on docker's azure") + } + } // Restart a full cluster with minikube start startArgs := append([]string{"start", "-p", profile}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) From 1945b5f7bb3d8d8e5ab5ba5bc383c9c901d640f3 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 27 May 2020 10:44:40 -0700 Subject: [PATCH 26/27] swap test order to make it work in github actions --- test/integration/multinode_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index a17168bfa30e..3210ed4bb3b3 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -45,9 +45,9 @@ func TestMultiNode(t *testing.T) { {"AddNode", validateAddNodeToMultiNode}, {"StopNode", validateStopRunningNode}, {"StartAfterStop", validateStartNodeAfterStop}, + {"DeleteNode", validateDeleteNodeFromMultiNode}, {"StopMultiNode", validateStopMultiNodeCluster}, {"RestartMultiNode", validateRestartMultiNodeCluster}, - {"DeleteNode", validateDeleteNodeFromMultiNode}, } for _, tc := range tests { tc := tc @@ -198,11 +198,11 @@ func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile str t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) } - if strings.Count(rr.Stdout.String(), "host: Stopped") != 3 { + if strings.Count(rr.Stdout.String(), "host: Stopped") != 2 { t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String()) } - if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 3 { + if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 2 { t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) } } @@ -230,11 +230,11 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) } - if strings.Count(rr.Stdout.String(), "host: Running") != 3 { + if strings.Count(rr.Stdout.String(), "host: Running") != 2 { t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) } - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { + if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) } } From cebba2983281bb4574be1ba63880ed7afdb9e4c4 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 27 May 2020 10:47:31 -0700 Subject: [PATCH 27/27] comments --- test/integration/multinode_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 3210ed4bb3b3..e43c5a2eb08e 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -192,7 +192,7 @@ func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile str t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) } - // Make sure minikube status shows 3 stopped nodes + // Make sure minikube status shows 2 stopped nodes rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil && rr.ExitCode != 7 { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) @@ -224,7 +224,7 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) } - // Make sure minikube status shows 3 running nodes + // Make sure minikube status shows 2 running nodes rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)