Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

introduce code checker and code cleanup #171

Merged
merged 16 commits into from
Aug 25, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,15 @@ before_install:
- go get -v github.com/golang/dep/cmd/dep
- dep ensure
- go get -v github.com/golang/lint/golint
- go get -u honnef.co/go/tools/...
- go get -u mvdan.cc/unparam
- go get -u github.com/client9/misspell/cmd/misspell

script:
- golint cmd/ pkg/
- go test -v ./...
- unused $(go list ./...)
- gosimple $(go list ./...)
- misspell -error $(git ls-files | grep -v vendor/)
- unparam $(go list ./...)
- staticcheck $(go list ./...)
9 changes: 0 additions & 9 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 4 additions & 5 deletions cmd/cluster_add_external_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@ package cmd
import (
"errors"
"fmt"
"log"
"strings"

"github.com/spf13/cobra"
"github.com/xetys/hetzner-kube/pkg"
"github.com/xetys/hetzner-kube/pkg/clustermanager"
"github.com/xetys/hetzner-kube/pkg/hetzner"
"log"
"strings"
)

// clusterAddWorkerCmd represents the clusterAddWorker command
Expand Down Expand Up @@ -138,9 +139,7 @@ An external server must meet the following requirements:
}
externalNode.PrivateIPAddress = fmt.Sprintf("%s.%d", cidrPrefix, nextNode)
coordinator := pkg.NewProgressCoordinator()
hetznerProvider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, AppConf.CurrentContext.Token)
hetznerProvider.InitCluster(cluster.Name, cluster.NodeCIDR)
hetznerProvider.SetNodes(cluster.Nodes)
hetznerProvider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, *cluster, AppConf.CurrentContext.Token)
clusterManager := clustermanager.NewClusterManagerFromCluster(*cluster, hetznerProvider, sshClient, coordinator)

nodes := []clustermanager.Node{externalNode}
Expand Down
3 changes: 2 additions & 1 deletion cmd/cluster_add_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ You can specify the worker server type as in cluster create.`,
}

coordinator := pkg.NewProgressCoordinator()
hetznerProvider, clusterManager := hetzner.ProviderAndManager(AppConf.Context, *cluster, AppConf.Client, AppConf.SSHClient, coordinator, AppConf.CurrentContext.Token)
hetznerProvider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, *cluster, AppConf.CurrentContext.Token)
clusterManager := clustermanager.NewClusterManagerFromCluster(*cluster, hetznerProvider, AppConf.SSHClient, coordinator)
err := AppConf.SSHClient.(*clustermanager.SSHCommunicator).CapturePassphrase(sshKeyName)
if err != nil {
log.Fatal(err)
Expand Down
2 changes: 1 addition & 1 deletion cmd/cluster_addon.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func validateAddonSubCommand(cmd *cobra.Command, args []string) error {
return errors.New("exactly one argument expected")
}
addonName := args[0]
provider, _ := hetzner.ProviderAndManager(AppConf.Context, *cluster, AppConf.Client, AppConf.SSHClient, nil, AppConf.CurrentContext.Token)
provider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, *cluster, AppConf.CurrentContext.Token)
addonService := addons.NewClusterAddonService(provider, AppConf.SSHClient)
if !addonService.AddonExists(addonName) {
return fmt.Errorf("addon %s not found", addonName)
Expand Down
2 changes: 1 addition & 1 deletion cmd/cluster_addon_install.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ var clusterAddonInstallCmd = &cobra.Command{
_, cluster := AppConf.Config.FindClusterByName(name)

log.Printf("installing addon %s", addonName)
provider, _ := hetzner.ProviderAndManager(AppConf.Context, *cluster, AppConf.Client, AppConf.SSHClient, nil, AppConf.CurrentContext.Token)
provider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, *cluster, AppConf.CurrentContext.Token)
addonService := addons.NewClusterAddonService(provider, AppConf.SSHClient)
masterNode, err := provider.GetMasterNode()
FatalOnError(err)
Expand Down
2 changes: 1 addition & 1 deletion cmd/cluster_addon_list.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ var clusterAddonListCmd = &cobra.Command{
fmt.Fprintln(tw, "NAME\tREQUIRES\tDESCRIPTION\tURL")

cluster := &clustermanager.Cluster{Nodes: []clustermanager.Node{clustermanager.Node{IsMaster: true}}}
provider, _ := hetzner.ProviderAndManager(AppConf.Context, *cluster, AppConf.Client, AppConf.SSHClient, nil, AppConf.CurrentContext.Token)
provider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, *cluster, AppConf.CurrentContext.Token)
addonService := addons.NewClusterAddonService(provider, AppConf.SSHClient)
for _, addon := range addonService.Addons() {
requires := "-"
Expand Down
2 changes: 1 addition & 1 deletion cmd/cluster_addon_uninstall.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ var clusterAddonUninstallCmd = &cobra.Command{
_, cluster := AppConf.Config.FindClusterByName(name)

log.Printf("removing addon %s", addonName)
provider, _ := hetzner.ProviderAndManager(AppConf.Context, *cluster, AppConf.Client, AppConf.SSHClient, nil, AppConf.CurrentContext.Token)
provider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, *cluster, AppConf.CurrentContext.Token)
masterNode, err := provider.GetMasterNode()
FatalOnError(err)

Expand Down
14 changes: 7 additions & 7 deletions cmd/cluster_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,18 +79,18 @@ func RunClusterCreate(cmd *cobra.Command, args []string) {
workerServerType, _ := cmd.Flags().GetString("worker-server-type")
datacenters, _ := cmd.Flags().GetStringSlice("datacenters")
nodeCidr, _ := cmd.Flags().GetString("node-cidr")
cloudInit, _ := cmd.Flags().GetString("cloud-init")

hetznerProvider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, clustermanager.Cluster{
Name: clusterName,
NodeCIDR: nodeCidr,
CloudInitFile: cloudInit,
}, AppConf.CurrentContext.Token)

hetznerProvider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, AppConf.CurrentContext.Token)
hetznerProvider.InitCluster(clusterName, nodeCidr)
sshClient := clustermanager.NewSSHCommunicator(AppConf.Config.SSHKeys)
err := sshClient.(*clustermanager.SSHCommunicator).CapturePassphrase(sshKeyName)
FatalOnError(err)

var cloudInit string
if cloudInit, _ = cmd.Flags().GetString("cloud-init"); cloudInit != "" {
hetznerProvider.SetCloudInitFile(cloudInit)
}

if haEnabled && isolatedEtcd {
if _, err := hetznerProvider.CreateEtcdNodes(sshKeyName, masterServerType, datacenters, etcdCount); err != nil {
log.Println(err)
Expand Down
3 changes: 2 additions & 1 deletion cmd/cluster_create_test.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
package cmd

import (
"github.com/xetys/hetzner-kube/pkg/clustermanager"
"testing"

"github.com/xetys/hetzner-kube/pkg/clustermanager"
)

func TestClusterCmdValidate(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion cmd/cluster_kubeconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Example 4: hetzner-kube cluster kubeconfig -n my-cluster -p > my-conf.yaml # pri
name := args[0]
_, cluster := AppConf.Config.FindClusterByName(name)

provider, _ := hetzner.ProviderAndManager(AppConf.Context, *cluster, AppConf.Client, AppConf.SSHClient, nil, AppConf.CurrentContext.Token)
provider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, *cluster, AppConf.CurrentContext.Token)
masterNode, err := provider.GetMasterNode()
FatalOnError(err)

Expand Down
30 changes: 0 additions & 30 deletions cmd/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@ import (
"os/user"
"path/filepath"

"github.com/go-kit/kit/log/term"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/thcyron/uiprogress"
"github.com/xetys/hetzner-kube/pkg/clustermanager"
)

Expand Down Expand Up @@ -180,34 +178,6 @@ func (app *AppConfig) DeleteContextByName(name string) error {
return fmt.Errorf("context '%s' not found", name)
}

// ActionProgress (deprecated)
func (app *AppConfig) ActionProgress(ctx context.Context, action *hcloud.Action) error {
errCh, progressCh := waitAction(ctx, app.Client, action)

if term.IsTerminal(os.Stdout) {
progress := uiprogress.New()

progress.Start()
bar := progress.AddBar(100).AppendCompleted().PrependElapsed()
bar.Empty = ' '

for {
select {
case err := <-errCh:
if err == nil {
bar.Set(100)
}
progress.Stop()
return err
case p := <-progressCh:
bar.Set(p)
}
}
} else {
return <-errCh
}
}

func (app *AppConfig) assertActiveContext() error {
if app.CurrentContext == nil {
return errors.New("no context selected")
Expand Down
86 changes: 0 additions & 86 deletions cmd/util.go
Original file line number Diff line number Diff line change
@@ -1,105 +1,19 @@
package cmd

import (
"context"
"fmt"
"log"
"time"

"github.com/Pallinder/go-randomdata"
"github.com/hetznercloud/hcloud-go/hcloud"
)

var sshPassPhrases = make(map[string][]byte)

func waitAction(ctx context.Context, client *hcloud.Client, action *hcloud.Action) (<-chan error, <-chan int) {
errCh := make(chan error, 1)
progressCh := make(chan int)

go func() {
defer close(errCh)
defer close(progressCh)

ticker := time.NewTicker(100 * time.Millisecond)

sendProgress := func(p int) {
select {
case progressCh <- p:
break
default:
break
}
}

for {
select {
case <-ctx.Done():
errCh <- ctx.Err()
return
case <-ticker.C:
break
}

action, _, err := client.Action.GetByID(ctx, action.ID)
if err != nil {
errCh <- ctx.Err()
return
}

switch action.Status {
case hcloud.ActionStatusRunning:
sendProgress(action.Progress)
break
case hcloud.ActionStatusSuccess:
sendProgress(100)
errCh <- nil
return
case hcloud.ActionStatusError:
errCh <- action.Error()
return
}
}
}()

return errCh, progressCh
}

func randomName() string {
return fmt.Sprintf("%s-%s%s", randomdata.Adjective(), randomdata.Noun(), randomdata.Adjective())
}

//Index find the index of an element int the array
func Index(vs []string, t string) int {
for i, v := range vs {
if v == t {
return i
}
}
return -1
}

//Include indicate if a string is in the strinc array
func Include(vs []string, t string) bool {
return Index(vs, t) >= 0
}

//FatalOnError is an helper function to transform error to fatl
func FatalOnError(err error) {
if err != nil {
log.Fatal(err)
}
}

func waitOrError(tc chan bool, ec chan error, numProcPtr *int) error {
numProcs := *numProcPtr
for numProcs > 0 {
select {
case err := <-ec:
return err
case <-tc:
numProcs--
}
}

return nil
}
39 changes: 19 additions & 20 deletions pkg/clustermanager/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ type Manager struct {
eventService EventService
nodeCommunicator NodeCommunicator
clusterProvider ClusterProvider
wait bool
}

//NewClusterManager create a new manager for the cluster
Expand Down Expand Up @@ -102,7 +101,10 @@ func (manager *Manager) ProvisionNodes(nodes []Node) error {
func (manager *Manager) SetupEncryptedNetwork() error {
nodes := manager.nodes
// render a public/private key pair
keyPairs := manager.GenerateKeyPairs(nodes[0], len(nodes))
keyPairs, err := manager.GenerateKeyPairs(nodes[0], len(nodes))
if err != nil {
return fmt.Errorf("unable to setup encrypted network: %v", err)
}

for i, keyPair := range keyPairs {
manager.nodes[i].WireGuardKeyPair = keyPair
Expand Down Expand Up @@ -135,7 +137,7 @@ func (manager *Manager) SetupEncryptedNetwork() error {
}(node)
}

err := waitOrError(trueChan, errChan, &numProc)
err = waitOrError(trueChan, errChan, &numProc)
if err != nil {
return err
}
Expand Down Expand Up @@ -166,7 +168,6 @@ func (manager *Manager) InstallMasters() error {
numMaster := 0

for _, node := range manager.nodes {

if node.IsMaster {
_, err := manager.nodeCommunicator.RunCmd(node, "kubeadm reset")
if err != nil {
Expand Down Expand Up @@ -314,21 +315,15 @@ func (manager *Manager) InstallEtcdNodes(nodes []Node) error {

//InstallWorkers installs kubernetes workers to given nodes
func (manager *Manager) InstallWorkers(nodes []Node) error {
var joinCommand string
node, err := manager.clusterProvider.GetMasterNode()
if err != nil {
return err
}

// create join command
for _, node := range manager.nodes {
if node.IsMaster {
for tries := 0; ; tries++ {
output, err := manager.nodeCommunicator.RunCmd(node, "kubeadm token create --print-join-command")
if tries < 10 && err != nil {
return err
}
time.Sleep(2 * time.Second)
joinCommand = output
break
}
break
}
joinCommand, err := manager.nodeCommunicator.RunCmd(*node, "kubeadm token create --print-join-command")
if err != nil {
return err
}

errChan := make(chan error)
Expand Down Expand Up @@ -443,8 +438,12 @@ func (manager *Manager) DeployLoadBalancer(nodes []Node) error {
errChan := make(chan error)
trueChan := make(chan bool)
numProcs := 0
masterNodes := manager.clusterProvider.GetMasterNodes()
masterIps := strings.Join(Nodes2IPs(masterNodes), " ")
masterNodesIP := []string{}
for _, node := range manager.clusterProvider.GetMasterNodes() {
masterNodesIP = append(masterNodesIP, node.IPAddress)
}

masterIps := strings.Join(masterNodesIP, " ")
for _, node := range nodes {
if !node.IsMaster && node.IsEtcd {
continue
Expand Down
Loading