diff --git a/cmd/cluster_create.go b/cmd/cluster_create.go index 4f5bb003..73cbee4d 100644 --- a/cmd/cluster_create.go +++ b/cmd/cluster_create.go @@ -69,6 +69,7 @@ func RunClusterCreate(cmd *cobra.Command, args []string) { datacenters, _ := cmd.Flags().GetStringSlice("datacenters") nodeCidr, _ := cmd.Flags().GetString("node-cidr") cloudInit, _ := cmd.Flags().GetString("cloud-init") + cni, _ := cmd.Flags().GetString("cni") hetznerProvider := hetzner.NewHetznerProvider(AppConf.Context, AppConf.Client, clustermanager.Cluster{ Name: clusterName, @@ -103,7 +104,7 @@ func RunClusterCreate(cmd *cobra.Command, args []string) { coordinator := pkg.NewProgressCoordinator() - clusterManager := clustermanager.NewClusterManager(hetznerProvider, sshClient, coordinator, clusterName, haEnabled, isolatedEtcd, cloudInit) + clusterManager := clustermanager.NewClusterManager(hetznerProvider, sshClient, coordinator, clusterName, haEnabled, isolatedEtcd, cloudInit, cni) cluster := clusterManager.Cluster() saveCluster(&cluster) renderProgressBars(&cluster, coordinator) @@ -194,7 +195,7 @@ func computeMasterSteps(numMaster int, cluster *clustermanager.Cluster) int { func validateClusterCreateFlags(cmd *cobra.Command, args []string) error { var ( - sshKey, masterServerType, workerServerType, cloudInit string + sshKey, masterServerType, workerServerType, cloudInit, cni string ) if sshKey, _ = cmd.Flags().GetString("ssh-key"); sshKey == "" { @@ -223,6 +224,10 @@ func validateClusterCreateFlags(cmd *cobra.Command, args []string) error { } } + if cni, _ = cmd.Flags().GetString("cni"); cni != "canal" && cni != "calico" { + return errors.New("flag --cni only allows canal or calico") + } + if _, err := AppConf.Config.FindSSHKeyByName(sshKey); err != nil { return fmt.Errorf("SSH key '%s' not found", sshKey) } @@ -271,6 +276,7 @@ func init() { clusterCreateCmd.Flags().IntP("worker-count", "w", 1, "Number of worker nodes for the cluster") clusterCreateCmd.Flags().StringP("cloud-init", "", "", "Cloud-init file for server preconfiguration") clusterCreateCmd.Flags().StringP("node-cidr", "", "10.0.1.0/24", "the CIDR for the nodes wireguard IPs") + clusterCreateCmd.Flags().StringP("cni", "", "canal", "The CNI you want to use") // get default datacenters dcs := []string{} diff --git a/cmd/cluster_phase.go b/cmd/cluster_phase.go index 0bbc9621..5a407293 100644 --- a/cmd/cluster_phase.go +++ b/cmd/cluster_phase.go @@ -52,6 +52,7 @@ func getCommonPhaseDependencies(steps int, cmd *cobra.Command, args []string) (c cluster.HaEnabled, cluster.IsolatedEtcd, cluster.CloudInitFile, + cluster.Cni, ) return provider, clusterManager, coordinator diff --git a/cmd/cluster_phase_install_masters.go b/cmd/cluster_phase_install_masters.go index 07d50105..cba70c1a 100644 --- a/cmd/cluster_phase_install_masters.go +++ b/cmd/cluster_phase_install_masters.go @@ -54,6 +54,7 @@ var installMastersPhaseCommand = &cobra.Command{ cluster.HaEnabled, cluster.IsolatedEtcd, cluster.CloudInitFile, + cluster.Cni, ) phase := phases.NewInstallMastersPhase(clusterManager, phaseOptions) diff --git a/cmd/cluster_phase_install_workers.go b/cmd/cluster_phase_install_workers.go index 042bc924..9fd30d92 100644 --- a/cmd/cluster_phase_install_workers.go +++ b/cmd/cluster_phase_install_workers.go @@ -44,6 +44,7 @@ var installWorkersCommand = &cobra.Command{ cluster.HaEnabled, cluster.IsolatedEtcd, cluster.CloudInitFile, + cluster.Cni, ) phase := phases2.NewInstallWorkersPhase(clusterManager) diff --git a/cmd/cluster_phase_setup_ha.go b/cmd/cluster_phase_setup_ha.go index 3573819d..7f5175ab 100644 --- a/cmd/cluster_phase_setup_ha.go +++ b/cmd/cluster_phase_setup_ha.go @@ -46,6 +46,7 @@ var setupHAPhaseCommand = &cobra.Command{ cluster.HaEnabled, cluster.IsolatedEtcd, cluster.CloudInitFile, + cluster.Cni, ) phase := phases.NewSetupHighAvailabilityPhase(clusterManager) diff --git a/docs/cluster-create.md b/docs/cluster-create.md index 0dde230a..88b3a839 100644 --- a/docs/cluster-create.md +++ b/docs/cluster-create.md @@ -38,3 +38,4 @@ The following custom options are available for the cluster create command: - `--worker-count`,`-w`: Number of worker nodes for the cluster , *default: 1* - `--cloud-init`: Cloud-init file for server preconfiguration - `--datacenters`: Can be used to filter datacenters by their name, *options: fsn-dc8, nbg1-dc3, hel1-dc2, fsn1-dc14* +- `--cni string`: The CNI you want to use, *default: canal*, *options: canal, calico* diff --git a/pkg/clustermanager/cluster.go b/pkg/clustermanager/cluster.go index f7ba2eca..a98160fb 100644 --- a/pkg/clustermanager/cluster.go +++ b/pkg/clustermanager/cluster.go @@ -15,6 +15,7 @@ type Manager struct { nodes []Node clusterName string cloudInitFile string + cni string eventService EventService nodeCommunicator NodeCommunicator clusterProvider ClusterProvider @@ -36,7 +37,7 @@ const ( ) // NewClusterManager create a new manager for the cluster -func NewClusterManager(provider ClusterProvider, nodeCommunicator NodeCommunicator, eventService EventService, name string, haEnabled bool, isolatedEtcd bool, cloudInitFile string) *Manager { +func NewClusterManager(provider ClusterProvider, nodeCommunicator NodeCommunicator, eventService EventService, name string, haEnabled bool, isolatedEtcd bool, cloudInitFile string, cni string) *Manager { manager := &Manager{ clusterName: name, haEnabled: haEnabled, @@ -45,6 +46,7 @@ func NewClusterManager(provider ClusterProvider, nodeCommunicator NodeCommunicat eventService: eventService, nodeCommunicator: nodeCommunicator, clusterProvider: provider, + cni: cni, nodes: provider.GetAllNodes(), } @@ -75,6 +77,7 @@ func (manager *Manager) Cluster() Cluster { CloudInitFile: manager.cloudInitFile, NodeCIDR: manager.clusterProvider.GetNodeCidr(), KubernetesVersion: "1.16.4", + Cni: manager.cni, } } @@ -171,7 +174,14 @@ func (manager *Manager) InstallMasters(keepCerts KeepCerts) error { commands := []NodeCommand{ {"kubeadm init", "kubectl version > /dev/null &> /dev/null || kubeadm init --ignore-preflight-errors=all --config /root/master-config.yaml"}, {"configure kubectl", "rm -rf $HOME/.kube && mkdir -p $HOME/.kube && cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && chown $(id -u):$(id -g) $HOME/.kube/config"}, - {"install canal", "kubectl apply -f https://docs.projectcalico.org/v3.10/manifests/canal.yaml"}, + } + + if manager.cni == "canal" { + commands = append(commands, NodeCommand{"install canal", "kubectl apply -f https://docs.projectcalico.org/v3.10/manifests/canal.yaml"}) + } else if manager.cni == "calico" { + // We had to change the Pod CIDR to match the one defined in the kubeadm config file + // We had to tweak MTU for our install with Wireguard and the overhead it needs. Calculation is 1500 (eth0) - 80 (wirguard) - 20 (Calico IPinIP) + commands = append(commands, NodeCommand{"install calico", "cd /tmp && curl https://docs.projectcalico.org/v3.11/manifests/calico.yaml -O && sed -i -e \"s?192.168.0.0/16?10.244.0.0/16?g\" calico.yaml && sed -i -e \"s?1440?1400?g\" calico.yaml && kubectl apply -f calico.yaml"}) } // inject custom commands diff --git a/pkg/clustermanager/types.go b/pkg/clustermanager/types.go index 7582b0f6..ab699960 100644 --- a/pkg/clustermanager/types.go +++ b/pkg/clustermanager/types.go @@ -21,6 +21,7 @@ type Cluster struct { CloudInitFile string `json:"cloud_init_file"` NodeCIDR string `json:"node_cidr"` KubernetesVersion string `json:"kubernetes_version"` + Cni string `json:"cni"` } // NodeCommand is the structure used to define acommand to execute on a node