From a00d632930deb3d1da068654dd8cbe1bc71c9d78 Mon Sep 17 00:00:00 2001 From: Daehyeok Mun Date: Fri, 8 Jan 2021 19:21:16 -0800 Subject: [PATCH] Check profile, node name to prevent duplication Check profile and node name before add to prevent conflict with machine name on multinode cluster. --- cmd/minikube/cmd/start.go | 21 ++++++++++++++++ pkg/minikube/config/profile.go | 22 +++++++++++++++-- pkg/minikube/machine/cluster_test.go | 4 +-- pkg/minikube/node/node.go | 18 ++++++++++++++ test/integration/multinode_test.go | 37 ++++++++++++++++++++++++++++ 5 files changed, 97 insertions(+), 5 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 24cda29dfe11..78b201677c06 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -176,6 +176,8 @@ func runStart(cmd *cobra.Command, args []string) { if existing != nil { upgradeExistingConfig(existing) + } else { + validateProfileName() } validateSpecifiedDriver(existing) @@ -638,6 +640,25 @@ func hostDriver(existing *config.ClusterConfig) string { return h.Driver.DriverName() } +// validateProfileName makes sure that new profile name not duplicated with any of machine names in existing multi-node clusters. +func validateProfileName() { + profiles, err := config.ListValidProfiles() + if err != nil { + exit.Message(reason.InternalListConfig, "Unable to list profiles: {{.error}}", out.V{"error": err}) + } + for _, p := range profiles { + for _, n := range p.Config.Nodes { + machineName := config.MachineName(*p.Config, n) + if ClusterFlagValue() == machineName { + out.WarningT("Profile name '{{.name}}' is duplicated with machine name '{{.machine}}' in profile '{{.profile}}'", out.V{"name": ClusterFlagValue(), + "machine": machineName, + "profile": p.Name}) + exit.Message(reason.Usage, "Profile name should be unique") + } + } + } +} + // validateSpecifiedDriver makes sure that if a user has passed in a driver // it matches the existing cluster if there is one func validateSpecifiedDriver(existing *config.ClusterConfig) { diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index a632f0485958..d1b58947c278 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -223,7 +223,25 @@ func ListProfiles(miniHome ...string) (validPs []*Profile, inValidPs []*Profile, return validPs, inValidPs, nil } -// removeDupes removes duplicates +// ListValidProfiles returns profiles in minikube home dir +// Unlike `ListProfiles` this function doens't try to get profile from container +func ListValidProfiles(miniHome ...string) (ps []*Profile, err error) { + // try to get profiles list based on left over evidences such as directory + pDirs, err := profileDirs(miniHome...) + if err != nil { + return nil, err + } + + for _, n := range pDirs { + p, err := LoadProfile(n, miniHome...) + if err == nil && p.IsValid() { + ps = append(ps, p) + } + } + return ps, nil +} + +// removeDupes removes duplipcates func removeDupes(profiles []string) []string { // Use map to record duplicates as we find them. seen := map[string]bool{} @@ -291,7 +309,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string { // MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names func MachineName(cc ClusterConfig, n Node) string { // For single node cluster, default to back to old naming - if len(cc.Nodes) == 1 || n.ControlPlane { + if (len(cc.Nodes) == 1 && cc.Nodes[0].Name == n.Name) || n.ControlPlane { return cc.Name } return fmt.Sprintf("%s-%s", cc.Name, n.Name) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index ea32cc36a743..b33f8e65ff3f 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -146,10 +146,8 @@ func TestStartHostExists(t *testing.T) { mc := defaultClusterConfig mc.Name = ih.Name - n := config.Node{Name: ih.Name} - // This should pass without calling Create because the host exists already. - h, _, err := StartHost(api, &mc, &n) + h, _, err := StartHost(api, &mc, &(mc.Nodes[0])) if err != nil { t.Fatalf("Error starting host: %v", err) } diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 672e52276d54..2f41f49a5f04 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -38,6 +38,24 @@ const ( // Add adds a new node config to an existing cluster. func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { + profiles, err := config.ListValidProfiles() + if err != nil { + return err + } + + machineName := config.MachineName(*cc, n) + for _, p := range profiles { + if p.Config.Name == cc.Name { + continue + } + + for _, existNode := range p.Config.Nodes { + if machineName == config.MachineName(*p.Config, existNode) { + return errors.Errorf("Node %s already exists in %s profile", machineName, p.Name) + } + } + } + if err := config.SaveNode(cc, &n); err != nil { return errors.Wrap(err, "save node") } diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index bad0bf781789..268bbf25a94c 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -48,6 +48,7 @@ func TestMultiNode(t *testing.T) { {"DeleteNode", validateDeleteNodeFromMultiNode}, {"StopMultiNode", validateStopMultiNodeCluster}, {"RestartMultiNode", validateRestartMultiNodeCluster}, + {"ValidateNameConflict", validatNameConflict}, } for _, tc := range tests { tc := tc @@ -308,3 +309,39 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output()) } } + +func validatNameConflict(ctx context.Context, t *testing.T, profile string) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile)) + if err != nil { + t.Errorf("failed to run node list. args %q : %v", rr.Command(), err) + } + curNodeNum := strings.Count(rr.Stdout.String(), profile) + + // Start new profile. It's expected failture + profileName := fmt.Sprintf("%s-m0%d", profile, curNodeNum) + startArgs := append([]string{"start", "-p", profileName}, StartArgs()...) + rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err == nil { + t.Errorf("expected start profile command to fail. args %q", rr.Command()) + } + + // Start new profile temporary profile to conflict node name. + profileName = fmt.Sprintf("%s-m0%d", profile, curNodeNum+1) + startArgs = append([]string{"start", "-p", profileName}, StartArgs()...) + rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) + if err != nil { + t.Errorf("failed to start profile. args %q : %v", rr.Command(), err) + } + + // Add a node to the current cluster. It's expected failture + addArgs := []string{"node", "add", "-p", profile} + rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...)) + if err == nil { + t.Errorf("expected add node command to fail. args %q : %v", rr.Command(), err) + } + + rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profileName)) + if err != nil { + t.Logf("failed to clean temporary profile. args %q : %v", rr.Command(), err) + } +}