diff --git a/contrib/tectonic-cli-examples/README.md b/contrib/tectonic-cli-examples/README.md new file mode 100644 index 0000000000..ab0250ce46 --- /dev/null +++ b/contrib/tectonic-cli-examples/README.md @@ -0,0 +1,4 @@ +# Running + +`tectonic init --config=contrib/tectonic-cli-examples/config.yml` +`tectonic install assets --dir=Example` diff --git a/contrib/tectonic-cli-examples/config.yml b/contrib/tectonic-cli-examples/config.yml new file mode 100644 index 0000000000..c92e91286e --- /dev/null +++ b/contrib/tectonic-cli-examples/config.yml @@ -0,0 +1,364 @@ +Clusters: + # The name of the cluster. + # If used in a cloud-environment, this will be prepended to `tectonic_base_domain` resulting in the URL to the Tectonic console. + # + # Note: This field MUST be set manually prior to creating the cluster. + # Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. + - Name: Example + Platform: AWS + Networking: + # (optional) Configures the network to be used in Tectonic. One of the following values can be used: + # + # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. + # + # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. + # + # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. + # + # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. + Type: canal + + MTU: 1234 + + # (optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. + # The maximum size of this IP range is /12 + # NodeCIDR: 10.3.0.0/16 + + # (optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation. + # PodCIDR: 10.2.0.0/16 + Masters: + # The number of master nodes to be created. + # This applies only to cloud platforms. + NodeCount: 2 + MachineType: foo + Workers: + # The number of worker nodes to be created. + # This applies only to cloud platforms. + NodeCount: 3 + MachineType: bar + Etcd: + # The number of etcd nodes to be created. + # If set to zero, the count of etcd nodes will be determined automatically. + # + # Note: This is not supported on bare metal. + NodeCount: 3 + MachineType: baz + + # (optional) List of external etcd v3 servers to connect with (hostnames/IPs only). + # Needs to be set if using an external etcd cluster. + # Note: If this variable is defined, the installer will not create self-signed certs. + # To provide a CA certificate to trust the etcd servers, set "tectonic_etcd_ca_cert_path". + # + # Example: `["etcd1", "etcd2", "etcd3"]` + # ExternalServers: + + ExternalTLSMaterials: + # Validity period of the self-signed certificates (in hours). + # Default is 3 years. + # This setting is ignored if user provided certificates are used. + ValidityPeriod: 26280 + + # (optional) The path of the file containing the CA certificate for TLS communication with etcd. + # + # Note: This works only when used in conjunction with an external etcd cluster. + # If set, the variable `tectonic_etcd_servers` must also be set. + # EtcdCACertPath: /dev/null + Tectonic: + # The path the pull secret file in JSON format. + # This is known to be a "Docker pull secret" as produced by the docker login [1] command. + # A sample JSON content is shown in [2]. + # You can download the pull secret from your Account overview page at [3]. + # + # [1] https://docs.docker.com/engine/reference/commandline/login/ + # + # [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup + # + # [3] https://account.coreos.com/overview + PullSecretPath: + + # The path to the tectonic licence file. + # You can download the Tectonic license file from your Account overview page at [1]. + # + # [1] https://account.coreos.com/overview + LicensePath: /Users/dspangenberg/.tectonic/tectonic-license.txt + DNS: + # The base DNS domain of the cluster. It must NOT contain a trailing period. Some + # DNS providers will automatically add this if necessary. + # + # Example: `openstack.dev.coreos.systems`. + # + # Note: This field MUST be set manually prior to creating the cluster. + # This applies only to cloud platforms. + # + # [Azure-specific NOTE] + # To use Azure-provided DNS, `tectonic_base_domain` should be set to `""` + # If using DNS records, ensure that `tectonic_base_domain` is set to a properly configured external DNS zone. + # Instructions for configuring delegated domains for Azure DNS can be found here: https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns + BaseDomain: foo.bar.baz + ContainerLinux: + # (optional) The Container Linux update channel. + # + # Examples: `stable`, `beta`, `alpha` + # tectonic_container_linux_channel = "stable" + Channel: stable + + # The Container Linux version to use. Set to `latest` to select the latest available version for the selected update channel. + # + # Examples: `latest`, `1465.6.0` + Version: latest + Update: + Server: foo.bar + Channel: stable + AppID: 42-42-0 + Console: + AdminEmail: der@admin.com + AdminPassword: foobarbaz + # --- + GovCloud: + Profile: default + AssetsS3BucketName: bla + DNSServerIP: 1234 + + + + + # (optional) Extra AWS tags to be applied to created autoscaling group resources. + # This is a list of maps having the keys `key`, `value` and `propagate_at_launch`. + # + # Example: `[ { key = "foo", value = "bar", propagate_at_launch = true } ]` + # Tectonic: + # AutoScalingGroupExtraTags: + + + + + + AWS: + # (optional) Unique name under which the Amazon S3 bucket will be created. Bucket name must start with a lower case name and is limited to 63 characters. + # The Tectonic Installer uses the bucket to store tectonic assets and kubeconfig. + # If name is not provided the installer will construct the name using "Name", current AWS region and "BaseDomain" + # AssetsS3BucketName: + + # (optional) Extra AWS tags to be applied to created resources. + # + # Example: `{ "key" = "value", "foo" = "bar" }` + # ExtraTags: + + # (optional) If set to true, create private-facing ingress resources (ELB, A-records). + # If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone. + # PrivateEndpoints: true + + # (optional) This declares the AWS credentials profile to use. + # Profile: default + + # (optional) If set to true, create public-facing ingress resources (ELB, A-records). + # If set to false, no public-facing ingress resources will be created. + # PublicEndpoints: true + + # The target AWS region for the cluster. + Region: eu-west-1 + + # Name of an SSH key located within the AWS region. Example: coreos-user. + SSHKey: + + # Block of IP addresses used by the VPC. + # This should not overlap with any other networks, such as a private datacenter connected via Direct Connect. + VPCCIDRBlock: 10.0.0.0/16 + + External: + # (optional) List of subnet IDs within an existing VPC to deploy master nodes into. + # Required to use an existing VPC and the list must match the AZ count. + # + # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` + # MasterSubnetIDs: + + # (optional) If set, the given Route53 zone ID will be used as the internal (private) zone. + # This zone will be used to create etcd DNS records as well as internal API and internal Ingress records. + # If set, no additional private zone will be created. + # + # Example: `"Z1ILINNUJGTAO1"` + # PrivateZone: + + # (optional) ID of an existing VPC to launch nodes into. + # If unset a new VPC is created. + # + # Example: `vpc-123456` + # VPCID: + + # (optional) List of subnet IDs within an existing VPC to deploy worker nodes into. + # Required to use an existing VPC and the list must match the AZ count. + # + # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` + # WorkerSubnetIDs: + + Etcd: + # (optional) List of additional security group IDs for etcd nodes. + # + # Example: `["sg-51530134", "sg-b253d7cc"]` + # ExtraSGIDs: + + # (optional) Name of IAM role to use for the instance profiles of etcd nodes. + # The name is also the last part of a role's ARN. + # + # Example: + # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer + # * Role Name = tectonic-installer + # IAMRoleName: + + # Instance size for the etcd node(s). Example: `t2.medium`. Read the [etcd recommended hardware](https://coreos.com/etcd/docs/latest/op-guide/hardware.html) guide for best performance + EC2Type: t2.medium + + RootVolume: + # The amount of provisioned IOPS for the root block device of etcd nodes. + # Ignored if the volume type is not io1. + IOPS: 100 + + # The size of the volume in gigabytes for the root block device of etcd nodes. + Size: 30 + + # The type of volume for the root block device of etcd nodes. + Type: gp2 + + Master: + # (optional) This configures master availability zones and their corresponding subnet CIDRs directly. + # + # Example: + # `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }` + # CustomSubnets: + + # Instance size for the master node(s). Example: `t2.medium`. + EC2Type: t2.medium + + # (optional) List of additional security group IDs for master nodes. + # + # Example: `["sg-51530134", "sg-b253d7cc"]` + # ExtraSGIDs: + + # (optional) Name of IAM role to use for the instance profiles of master nodes. + # The name is also the last part of a role's ARN. + # + # Example: + # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer + # * Role Name = tectonic-installer + # IAMRoleName: + + RootVolume: + # The amount of provisioned IOPS for the root block device of master nodes. + # Ignored if the volume type is not io1. + IOPS: 100 + + # The size of the volume in gigabytes for the root block device of master nodes. + Size: 30 + + # The type of volume for the root block device of master nodes. + Type: gp2 + + Worker: + # (optional) This configures worker availability zones and their corresponding subnet CIDRs directly. + # + # Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }` + # CustomSubnets: + + # Instance size for the worker node(s). Example: `t2.medium`. + EC2Type: t2.medium + + # (optional) List of additional security group IDs for worker nodes. + # + # Example: `["sg-51530134", "sg-b253d7cc"]` + # ExtraSGIDs: + + # (optional) Name of IAM role to use for the instance profiles of worker nodes. + # The name is also the last part of a role's ARN. + # + # Example: + # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer + # * Role Name = tectonic-installer + # IAMRoleName: + + # (optional) List of ELBs to attach all worker instances to. + # This is useful for exposing NodePort services via load-balancers managed separately from the cluster. + # + # Example: + # * `["ingress-nginx"]` + # LoadBalancers: + + RootVolume: + # The amount of provisioned IOPS for the root block device of worker nodes. + # Ignored if the volume type is not io1. + IOPS: 100 + + # The size of the volume in gigabytes for the root block device of worker nodes. + Size: 30 + + # The type of volume for the root block device of worker nodes. + Type: gp2 + + + # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. + # If left blank, a CA certificate will be automatically generated. + # tectonic_ca_cert = "" + + # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. + # This field is mandatory if `tectonic_ca_cert` is set. + # tectonic_ca_key = "" + + # (optional) The algorithm used to generate tectonic_ca_key. + # The default value is currently recommended. + # This field is mandatory if `tectonic_ca_cert` is set. + # tectonic_ca_key_alg = "RSA" + + # (optional) A list of PEM encoded CA files that will be installed in /etc/ssl/certs on etcd, master, and worker nodes. + # tectonic_custom_ca_pem_list = "" + + # (optional) This only applies if you use the modules/dns/ddns module. + # + # Specifies the RFC2136 Dynamic DNS server key algorithm. + # tectonic_ddns_key_algorithm = "" + + # (optional) This only applies if you use the modules/dns/ddns module. + # + # Specifies the RFC2136 Dynamic DNS server key name. + # tectonic_ddns_key_name = "" + + # (optional) This only applies if you use the modules/dns/ddns module. + # + # Specifies the RFC2136 Dynamic DNS server key secret. + # tectonic_ddns_key_secret = "" + + # (optional) This only applies if you use the modules/dns/ddns module. + # + # Specifies the RFC2136 Dynamic DNS server IP/host to register IP addresses to. + # tectonic_ddns_server = "" + + # (optional) DNS prefix used to construct the console and API server endpoints. + # tectonic_dns_name = "" + + # (optional) The path of the file containing the client certificate for TLS communication with etcd. + # + # Note: This works only when used in conjunction with an external etcd cluster. + # If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_key_path` must also be set. + # tectonic_etcd_client_cert_path = "/dev/null" + + # (optional) The path of the file containing the client key for TLS communication with etcd. + # + # Note: This works only when used in conjunction with an external etcd cluster. + # If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_cert_path` must also be set. + # tectonic_etcd_client_key_path = "/dev/null" + + # (optional) HTTP proxy address. + # + # Example: `http://myproxy.example.com` + # tectonic_http_proxy_address = "" + + # (optional) HTTPS proxy address. + # + # Example: `http://myproxy.example.com` + # tectonic_https_proxy_address = "" + + # (optional) Start iscsid.service to enable iscsi volume attachment. + # tectonic_iscsi_enabled = "false" + + # (optional) List of local endpoints that will not use HTTP proxy. + # + # Example: `["127.0.0.1","localhost",".example.com","10.3.0.1"]` + # tectonic_no_proxy = "" diff --git a/installer/cmd/tectonic/main.go b/installer/cmd/tectonic/main.go index 4e41728f1f..a7ac29d840 100644 --- a/installer/cmd/tectonic/main.go +++ b/installer/cmd/tectonic/main.go @@ -2,60 +2,47 @@ package main import ( "log" + "os" "github.com/coreos/tectonic-installer/installer/pkg/workflow" "gopkg.in/alecthomas/kingpin.v2" ) var ( - dryRunFlag = kingpin.Flag("dry-run", "Just pretend, but don't do anything").Bool() - clusterInstallCommand = kingpin.Command("install", "Create a new Tectonic cluster") - clusterFullInstallCommand = clusterInstallCommand.Command("full", "Create a new Tectonic cluster").Default() - clusterAssetsCommand = clusterInstallCommand.Command("assets", "Generate Tectonic assets.") - clusterBootstrapCommand = clusterInstallCommand.Command("bootstrap", "Create a single bootstrap node Tectonic cluster.") - clusterJoinCommand = clusterInstallCommand.Command("join", "Create master and worker nodes to join an exisiting Tectonic cluster.") - clusterDeleteCommand = kingpin.Command("delete", "Delete an existing Tectonic cluster") - deleteClusterDir = clusterDeleteCommand.Arg("dir", "The name of the cluster to delete").String() - clusterConfigFlag = clusterInstallCommand.Flag("config", "Cluster specification file").Required().ExistingFile() + clusterInitCommand = kingpin.Command("init", "Initialize a new Tectonic cluster") + clusterInitConfigFlag = clusterInitCommand.Flag("config", "Cluster specification file").Required().ExistingFile() + + clusterInstallCommand = kingpin.Command("install", "Create a new Tectonic cluster") + clusterInstallAssetsCommand = clusterInstallCommand.Command("assets", "Generate Tectonic assets.") + clusterInstallBootstrapCommand = clusterInstallCommand.Command("bootstrap", "Create a single bootstrap node Tectonic cluster.") + clusterInstallFullCommand = clusterInstallCommand.Command("full", "Create a new Tectonic cluster").Default() + clusterInstallJoinCommand = clusterInstallCommand.Command("join", "Create master and worker nodes to join an exisiting Tectonic cluster.") + clusterInstallDirFlag = clusterInstallCommand.Flag("dir", "Cluster directory").Default(".").ExistingDir() + + clusterDestroyCommand = kingpin.Command("destroy", "Destroy an existing Tectonic cluster") + clusterDestroyDirFlag = clusterDestroyCommand.Arg("dir", "Cluster directory").Default(".").ExistingDir() ) func main() { - // TODO: actually do proper error handling + var w workflow.Workflow + switch kingpin.Parse() { - case clusterFullInstallCommand.FullCommand(): - { - w := workflow.NewInstallWorkflow(*clusterConfigFlag) - if err := w.Execute(); err != nil { - log.Fatal(err) - } - } - case clusterAssetsCommand.FullCommand(): - { - w := workflow.NewAssetsWorkflow(*clusterConfigFlag) - if err := w.Execute(); err != nil { - log.Fatal(err) - } - } - case clusterBootstrapCommand.FullCommand(): - { - w := workflow.NewBootstrapWorkflow(*clusterConfigFlag) - if err := w.Execute(); err != nil { - log.Fatal(err) - } - } - case clusterJoinCommand.FullCommand(): - { - w := workflow.NewJoinWorkflow(*clusterConfigFlag) - if err := w.Execute(); err != nil { - log.Fatal(err) - } - } - case clusterDeleteCommand.FullCommand(): - { - w := workflow.NewDestroyWorkflow(*deleteClusterDir) - if err := w.Execute(); err != nil { - log.Fatal(err) - } - } + case clusterInitCommand.FullCommand(): + w = workflow.NewInitWorkflow(*clusterInitConfigFlag) + case clusterInstallFullCommand.FullCommand(): + w = workflow.NewInstallFullWorkflow(*clusterInstallDirFlag) + case clusterInstallAssetsCommand.FullCommand(): + w = workflow.NewInstallAssetsWorkflow(*clusterInstallDirFlag) + case clusterInstallBootstrapCommand.FullCommand(): + w = workflow.NewInstallBootstrapWorkflow(*clusterInstallDirFlag) + case clusterInstallJoinCommand.FullCommand(): + w = workflow.NewInstallJoinWorkflow(*clusterInstallDirFlag) + case clusterDestroyCommand.FullCommand(): + w = workflow.NewDestroyWorkflow(*clusterDestroyDirFlag) + } + + if err := w.Execute(); err != nil { + log.Fatal(err) + os.Exit(1) } } diff --git a/installer/pkg/tectonic/BUILD.bazel b/installer/pkg/tectonic/BUILD.bazel deleted file mode 100644 index 025b376fc5..0000000000 --- a/installer/pkg/tectonic/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "buildstate.go", - ], - importpath = "github.com/coreos/tectonic-installer/installer/pkg/tectonic", - visibility = ["//installer:__subpackages__"], - deps = [ - "//installer/pkg/config:go_default_library", - "//installer/pkg/config-generator:go_default_library", - "//installer/pkg/terraform-generator:go_default_library", - ], -) diff --git a/installer/pkg/tectonic/buildstate.go b/installer/pkg/tectonic/buildstate.go deleted file mode 100644 index 87687dff9b..0000000000 --- a/installer/pkg/tectonic/buildstate.go +++ /dev/null @@ -1,102 +0,0 @@ -package tectonic - -import ( - "bufio" - "fmt" - "log" - "os" - "path/filepath" - "strings" - - "github.com/coreos/tectonic-installer/installer/pkg/config" - "github.com/coreos/tectonic-installer/installer/pkg/config-generator" - "github.com/coreos/tectonic-installer/installer/pkg/terraform-generator" -) - -const ( - kubeSystemFileName = "kube-system.yml" - tectonicSystemFileName = "tectonic-system.yml" -) - -// NewBuildLocation creates a new directory on disk that will become -// the root location for all statefull artefacts of the current cluster build. -func NewBuildLocation(clusterName string) string { - var err error - pwd, err := os.Getwd() - if err != nil { - log.Fatalf("Failed to get current directory because: %v", err) - } - buildPath := filepath.Join(pwd, clusterName) - err = os.MkdirAll(buildPath, os.ModeDir|0755) - if err != nil { - log.Fatalf("Failed to create build folder at %s", buildPath) - } - return buildPath -} - -// FindTemplatesForType determines the location of top-level -// Terraform templates for a given type (platform) of build. -// TEMPORARY: implement actual detection of templates from released artefacts. -func FindTemplatesForType(buildType string) string { - pwd, _ := os.Getwd() - return filepath.Join(pwd, "platforms", strings.ToLower(buildType)) -} - -// GenerateClusterConfig writes, if successful, the cluster configuration. -func GenerateClusterConfig(cluster config.Cluster, configPath string) error { - configGenerator := configgenerator.New(cluster) - - kubeSystem, err := configGenerator.KubeSystem() - if err != nil { - return err - } - - kubeSystemConfigFilePath := filepath.Join(configPath, kubeSystemFileName) - if err := writeFile(kubeSystemConfigFilePath, kubeSystem); err != nil { - return err - } - - tectonicSystem, err := configGenerator.TectonicSystem() - if err != nil { - return err - } - - tectonicSystemConfigFilePath := filepath.Join(configPath, tectonicSystemFileName) - return writeFile(tectonicSystemConfigFilePath, tectonicSystem) -} - -// GenerateTerraformVars writes, if successful, the terraform variables. -func GenerateTerraformVars(cluster config.Cluster, configFilePath string) error { - terraformGenerator := terraformgenerator.New(cluster) - - vars, err := terraformGenerator.TFVars() - if err != nil { - return err - } - - return writeFile(configFilePath, vars) -} - -func writeFile(path, content string) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - w := bufio.NewWriter(f) - if _, err := fmt.Fprintln(w, content); err != nil { - return err - } - w.Flush() - - return nil -} - -// FindTemplatesForStep determines the location of top-level -// Terraform templates for a given step of build. -func FindTemplatesForStep(step ...string) string { - pwd, _ := os.Getwd() - step = append([]string{pwd, "steps"}, step...) - return filepath.Join(step...) -} diff --git a/installer/pkg/workflow/BUILD.bazel b/installer/pkg/workflow/BUILD.bazel index d94ff9035c..0878c91c57 100644 --- a/installer/pkg/workflow/BUILD.bazel +++ b/installer/pkg/workflow/BUILD.bazel @@ -1,19 +1,27 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_test( + name = "go_default_test", + srcs = ["workflow_test.go"], + embed = [":go_default_library"], +) go_library( name = "go_default_library", srcs = [ "destroy.go", + "init.go", "install.go", - "utils.go", "terraform.go", + "utils.go", "workflow.go", ], importpath = "github.com/coreos/tectonic-installer/installer/pkg/workflow", visibility = ["//visibility:public"], deps = [ "//installer/pkg/config:go_default_library", - "//installer/pkg/tectonic:go_default_library", + "//installer/pkg/config-generator:go_default_library", + "//installer/pkg/terraform-generator:go_default_library", "//installer/vendor/k8s.io/client-go/kubernetes:go_default_library", "//installer/vendor/k8s.io/client-go/tools/clientcmd:go_default_library", ], diff --git a/installer/pkg/workflow/destroy.go b/installer/pkg/workflow/destroy.go index c7ce9cf832..e0816b597f 100644 --- a/installer/pkg/workflow/destroy.go +++ b/installer/pkg/workflow/destroy.go @@ -1,71 +1,28 @@ package workflow -import ( - "log" - "os" - - "github.com/coreos/tectonic-installer/installer/pkg/tectonic" -) - // NewDestroyWorkflow creates new instances of the 'destroy' workflow, // responsible for running the actions required to remove resources // of an existing cluster and clean up any remaining artefacts. -func NewDestroyWorkflow(buildPath string) Workflow { - pathStat, err := os.Stat(buildPath) - // TODO: add deeper checking of the path for having cluster state - if os.IsNotExist(err) || !pathStat.IsDir() { - log.Fatalf("Provided path %s is not valid cluster state location.", buildPath) - } else if err != nil { - log.Fatalf("%v encountered while validating build location.", err) - } - - // TODO: get this dynamically once we move to cluster config - platform := "aws" - - if platform == "aws" { - return simpleWorkflow{ - metadata: metadata{ - statePath: buildPath, - }, - steps: []Step{ - terraformPrepareStep, - joiningDestroyStep, - bootstrapDestroyStep, - assetsDestroyStep, - }, - } - } - return simpleWorkflow{ - metadata: metadata{ - statePath: buildPath, - }, +func NewDestroyWorkflow(clusterDir string) Workflow { + return Workflow{ + metadata: metadata{clusterDir: clusterDir}, steps: []Step{ - terraformPrepareStep, - terraformInitStep, - terraformDestroyStep, + readClusterConfigStep, + destroyJoinStep, + destroyBootstrapStep, + destroyAssetsStep, }, } } -func terraformDestroyStep(m *metadata) error { - if m.statePath == "" { - log.Fatalf("Invalid build location - cannot destroy.") - } - log.Printf("Destroying cluster from %s...", m.statePath) - return tfDestroy(m.statePath, "state", tectonic.FindTemplatesForType(m.platform)) -} - -func joiningDestroyStep(m *metadata) error { - log.Printf("Destroying cluster from %s...", m.statePath) - return tfDestroy(m.statePath, "joining", tectonic.FindTemplatesForStep("joining")) +func destroyAssetsStep(m *metadata) error { + return tfDestroy(m.clusterDir, assetsStep, findTemplatesForStep(assetsStep)) } -func bootstrapDestroyStep(m *metadata) error { - log.Printf("Destroying cluster from %s...", m.statePath) - return tfDestroy(m.statePath, "bootstrap", tectonic.FindTemplatesForStep("bootstrap")) +func destroyBootstrapStep(m *metadata) error { + return tfDestroy(m.clusterDir, bootstrapStep, findTemplatesForStep(bootstrapStep)) } -func assetsDestroyStep(m *metadata) error { - log.Printf("Destroying cluster from %s...", m.statePath) - return tfDestroy(m.statePath, "assets", tectonic.FindTemplatesForStep("assets")) +func destroyJoinStep(m *metadata) error { + return tfDestroy(m.clusterDir, joinStep, findTemplatesForStep(joinStep)) } diff --git a/installer/pkg/workflow/init.go b/installer/pkg/workflow/init.go new file mode 100644 index 0000000000..2d7d2bf355 --- /dev/null +++ b/installer/pkg/workflow/init.go @@ -0,0 +1,66 @@ +package workflow + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/coreos/tectonic-installer/installer/pkg/terraform-generator" +) + +const ( + kubeSystemPath = "generated/manifests" + kubeSystemFileName = "cluster-config.yaml" + tectonicSystemPath = "generated/tectonic" + tectonicSystemFileName = "cluster-config.yaml" + terraformVariablesFileName = "terraform.tfvars" +) + +// NewInitWorkflow creates new instances of the 'init' workflow, +// responsible for initializing a new cluster. +func NewInitWorkflow(configFilePath string) Workflow { + return Workflow{ + metadata: metadata{configFilePath: configFilePath}, + steps: []Step{ + readClusterConfigStep, + prepareWorspaceStep, + generateTerraformVariablesStep, + }, + } +} + +func generateTerraformVariablesStep(m *metadata) error { + terraformGenerator := terraformgenerator.New(m.cluster) + + vars, err := terraformGenerator.TFVars() + if err != nil { + return err + } + + terraformVariablesFilePath := filepath.Join(m.clusterDir, terraformVariablesFileName) + return writeFile(terraformVariablesFilePath, vars) +} + +func prepareWorspaceStep(m *metadata) error { + dir, err := os.Getwd() + if err != nil { + return fmt.Errorf("Failed to get current directory because: %s", err) + } + + m.clusterDir = filepath.Join(dir, m.cluster.Name) + if stat, err := os.Stat(m.clusterDir); err == nil && stat.IsDir() { + return fmt.Errorf("cluster directory already exists at %s", m.clusterDir) + } + + if err := os.MkdirAll(m.clusterDir, os.ModeDir|0755); err != nil { + return fmt.Errorf("Failed to create cluster directory at %s", m.clusterDir) + } + + configFilePath := filepath.Join(m.clusterDir, configFileName) + if err := copyFile(m.configFilePath, configFilePath); err != nil { + return err + } + m.configFilePath = configFilePath + + return nil +} diff --git a/installer/pkg/workflow/install.go b/installer/pkg/workflow/install.go index 19ed82f1c8..9698519334 100644 --- a/installer/pkg/workflow/install.go +++ b/installer/pkg/workflow/install.go @@ -1,190 +1,89 @@ package workflow -import ( - "io" - "log" - "os" - "path/filepath" - - "github.com/coreos/tectonic-installer/installer/pkg/tectonic" -) - -const ( - configFileName = "config.yaml" - terraformVariablesFileName = "terraform.tfvars" - kubeConfig = "/generated/auth/kubeconfig" -) - -// NewInstallWorkflow creates new instances of the 'install' workflow, +// NewInstallFullWorkflow creates new instances of the 'install' workflow, // responsible for running the actions necessary to install a new cluster. -func NewInstallWorkflow(configFile string) Workflow { - - // TODO: move to tectonicGenerateClusterConfig/tectonicGenerateTerraformVariables and get this dynamically - clusterName := "cluster-aws" - platform := "aws" - - if platform == "aws" { - return simpleWorkflow{ - metadata: metadata{ - clusterName: clusterName, - configFile: configFile, - }, - steps: []Step{ - terraformPrepareStep, - assetsStep, - bootstrapStep, - joiningStep, - }, - } - } - return simpleWorkflow{ - metadata: metadata{ - clusterName: clusterName, - configFile: configFile, - platform: platform, - }, +func NewInstallFullWorkflow(clusterDir string) Workflow { + return Workflow{ + metadata: metadata{clusterDir: clusterDir}, steps: []Step{ - terraformPrepareStep, - terraformInitStep, - terraformApplyStep, + readClusterConfigStep, + installAssetsStep, + generateClusterConfigStep, + installBootstrapStep, + installJoinStep, }, } } -//func tectonicGenerateClusterConfig(m *metadata) error { -// return tectonic.GenerateClusterConfig(m.Cluster, m.statePath) -//} -// -//func tectonicGenerateTerraformVariables(m *metadata) error { -// configFilePath := filepath.Join(m.statePath, terraformVariablesFileName) -// -// return tectonic.GenerateTerraformVars(m.Cluster, configFilePath) -//} - -// NewAssetsWorkflow creates new instances of the 'assets' workflow, +// NewInstallAssetsWorkflow creates new instances of the 'assets' workflow, // responsible for running the actions necessary to generate cluster assets. -func NewAssetsWorkflow(configFile string) Workflow { - // TODO: move to tectonicGenerateClusterConfig/tectonicGenerateTerraformVariables and get this dynamically - clusterName := "cluster-aws" - return simpleWorkflow{ - metadata: metadata{ - clusterName: clusterName, - configFile: configFile, - }, +func NewInstallAssetsWorkflow(clusterDir string) Workflow { + return Workflow{ + metadata: metadata{clusterDir: clusterDir}, steps: []Step{ - terraformPrepareStep, - assetsStep, + readClusterConfigStep, + installAssetsStep, + generateClusterConfigStep, }, } } -// NewBootstrapWorkflow creates new instances of the 'bootstrap' workflow, +// NewInstallBootstrapWorkflow creates new instances of the 'bootstrap' workflow, // responsible for running the actions necessary to generate a single bootstrap machine cluster. -func NewBootstrapWorkflow(configFile string) Workflow { - // TODO: move to tectonicGenerateClusterConfig/tectonicGenerateTerraformVariables and get this dynamically - clusterName := "cluster-aws" - return simpleWorkflow{ - metadata: metadata{ - clusterName: clusterName, - configFile: configFile, - }, +func NewInstallBootstrapWorkflow(clusterDir string) Workflow { + return Workflow{ + metadata: metadata{clusterDir: clusterDir}, steps: []Step{ - terraformPrepareStep, - bootstrapStep, + readClusterConfigStep, + installBootstrapStep, }, } } -// NewJoinWorkflow creates new instances of the 'join' workflow, +// NewInstallJoinWorkflow creates new instances of the 'join' workflow, // responsible for running the actions necessary to scale the machines of the cluster. -func NewJoinWorkflow(configFile string) Workflow { - // TODO: move to tectonicGenerateClusterConfig/tectonicGenerateTerraformVariables and get this dynamically - clusterName := "cluster-aws" - return simpleWorkflow{ - metadata: metadata{ - clusterName: clusterName, - configFile: configFile, - }, +func NewInstallJoinWorkflow(clusterDir string) Workflow { + return Workflow{ + metadata: metadata{clusterDir: clusterDir}, steps: []Step{ - terraformPrepareStep, - joiningStep, + readClusterConfigStep, + installJoinStep, }, } } -func terraformPrepareStep(m *metadata) error { - if m.statePath == "" { - m.statePath = tectonic.NewBuildLocation(m.clusterName) - } - varfile := filepath.Join(m.statePath, m.configFile) - if _, err := os.Stat(varfile); os.IsNotExist(err) { - from, err := os.Open(m.configFile) - if err != nil { - return err - } - defer from.Close() - to, err := os.OpenFile(varfile, os.O_RDWR|os.O_CREATE, 0666) - if err != nil { - return err - } - defer to.Close() - _, err = io.Copy(to, from) - if err != nil { - return err - } - } - return nil +func installAssetsStep(m *metadata) error { + return runInstallStep(m.clusterDir, assetsStep) } -func terraformInitStep(m *metadata) error { - log.Printf("Initializing cluster ...") - return tfInit(m.statePath, tectonic.FindTemplatesForType(m.platform)) -} - -func terraformApplyStep(m *metadata) error { - log.Printf("Installation is running...") - return tfApply(m.statePath, "state", tectonic.FindTemplatesForType(m.platform)) -} - -func assetsStep(m *metadata) error { - log.Printf("Installation is running...") - return runStep(m.statePath, "assets") -} - -func bootstrapStep(m *metadata) error { - log.Printf("Installation is running...") - err := runStep(m.statePath, "bootstrap") - if err != nil { +func installBootstrapStep(m *metadata) error { + if err := runInstallStep(m.clusterDir, bootstrapStep); err != nil { return err } - err = waitForNcg(m) - if err != nil { + + if err := waitForNCG(m); err != nil { return err } - err = destroyCname(m) - if err != nil { + + if err := destroyCNAME(m.clusterDir); err != nil { return err } + return nil } -func joiningStep(m *metadata) error { +func installJoinStep(m *metadata) error { // TODO: import will fail after a first run, error is ignored for now importAutoScalingGroup(m) - log.Printf("Installation is running...") - return runStep(m.statePath, "joining") + + return runInstallStep(m.clusterDir, joinStep) } -func runStep(buildPath string, step string) error { - codePath := tectonic.FindTemplatesForStep(step) - err := tfInit(buildPath, codePath) - if err != nil { +func runInstallStep(clusterDir, step string) error { + templateDir := findTemplatesForStep(step) + if err := tfInit(clusterDir, templateDir); err != nil { return err } - err = tfApply(buildPath, step, codePath) - if err != nil { - return err - } - return nil + return tfApply(clusterDir, step, templateDir) } diff --git a/installer/pkg/workflow/terraform.go b/installer/pkg/workflow/terraform.go index 480ed46830..b6aaf7dd08 100644 --- a/installer/pkg/workflow/terraform.go +++ b/installer/pkg/workflow/terraform.go @@ -1,31 +1,29 @@ package workflow import ( + "fmt" "os" "os/exec" ) -func runTfCommand(buildPath string, args ...string) error { - tfCommand := exec.Command("terraform", args...) - tfCommand.Dir = buildPath - tfCommand.Stdin = os.Stdin - tfCommand.Stdout = os.Stdout - tfCommand.Stderr = os.Stderr - err := tfCommand.Run() - if err != nil { - return err - } - return nil +func terraformExec(clusterDir string, args ...string) error { + tf := exec.Command("terraform", args...) + tf.Dir = clusterDir + tf.Stdin = os.Stdin + tf.Stdout = os.Stdout + tf.Stderr = os.Stderr + + return tf.Run() } -func tfInit(buildPath string, codePath string) error { - return runTfCommand(buildPath, "init", codePath) +func tfApply(clusterDir, state, templateDir string) error { + return terraformExec(clusterDir, "apply", "-auto-approve", fmt.Sprintf("-state=%s.tfstate", state), templateDir) } -func tfDestroy(buildPath string, state string, codePath string) error { - return runTfCommand(buildPath, "destroy", "-force", "-state="+state+".tfstate", codePath) +func tfDestroy(clusterDir, state, templateDir string) error { + return terraformExec(clusterDir, "destroy", "-force", fmt.Sprintf("-state=%s.tfstate", state), templateDir) } -func tfApply(buildPath string, state string, codePath string) error { - return runTfCommand(buildPath, "apply", "-state="+state+".tfstate", codePath) +func tfInit(clusterDir, templateDir string) error { + return terraformExec(clusterDir, "init", templateDir) } diff --git a/installer/pkg/workflow/utils.go b/installer/pkg/workflow/utils.go index 11a99ea3e2..7831339e54 100644 --- a/installer/pkg/workflow/utils.go +++ b/installer/pkg/workflow/utils.go @@ -1,18 +1,127 @@ package workflow import ( + "bufio" "errors" + "fmt" + "io" "log" + "os" + "path/filepath" "time" - "github.com/coreos/tectonic-installer/installer/pkg/tectonic" + "github.com/coreos/tectonic-installer/installer/pkg/config" + "github.com/coreos/tectonic-installer/installer/pkg/config-generator" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" ) -func waitForNcg(m *metadata) error { - kubeconfigPath := m.statePath + kubeConfig - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) +const ( + assetsStep = "assets" + bootstrapStep = "bootstrap" + configFileName = "config.yaml" + joinStep = "joining" + kubeConfigPath = "generated/auth/kubeconfig" +) + +func copyFile(fromFilePath, toFilePath string) error { + from, err := os.Open(fromFilePath) + if err != nil { + return err + } + defer from.Close() + + to, err := os.OpenFile(toFilePath, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return err + } + defer to.Close() + + _, err = io.Copy(to, from) + return err +} + +func destroyCNAME(clusterDir string) error { + return terraformExec(clusterDir, "destroy", "-force", fmt.Sprintf("-state=%s.tfstate", bootstrapStep), "-target=aws_route53_record.tectonic_ncg", findTemplatesForStep(bootstrapStep)) +} + +// TODO: Handle errors for Getwd +func findTemplatesForStep(step string) string { + dir, _ := os.Getwd() + return filepath.Join(dir, "steps", step) +} + +func generateClusterConfigStep(m *metadata) error { + configGenerator := configgenerator.New(m.cluster) + + kubeSystem, err := configGenerator.KubeSystem() + if err != nil { + return err + } + + kubePath := filepath.Join(m.clusterDir, kubeSystemPath) + if err := os.MkdirAll(kubePath, os.ModeDir|0755); err != nil { + return fmt.Errorf("Failed to create manifests directory at %s", kubePath) + } + + kubeSystemConfigFilePath := filepath.Join(kubePath, kubeSystemFileName) + if err := writeFile(kubeSystemConfigFilePath, kubeSystem); err != nil { + return err + } + + tectonicSystem, err := configGenerator.TectonicSystem() + if err != nil { + return err + } + + tectonicPath := filepath.Join(m.clusterDir, tectonicSystemPath) + if err := os.MkdirAll(tectonicPath, os.ModeDir|0755); err != nil { + return fmt.Errorf("Failed to create tectonic directory at %s", tectonicPath) + } + + tectonicSystemConfigFilePath := filepath.Join(tectonicPath, tectonicSystemFileName) + return writeFile(tectonicSystemConfigFilePath, tectonicSystem) +} + +func importAutoScalingGroup(m *metadata) error { + if err := terraformExec(m.clusterDir, "import", fmt.Sprintf("-state=%s.tfstate", joinStep), fmt.Sprintf("-config=%s", findTemplatesForStep(joinStep)), "aws_autoscaling_group.masters", fmt.Sprintf("%s-masters", m.cluster.Name)); err != nil { + return err + } + + return terraformExec(m.clusterDir, "import", fmt.Sprintf("-state=%s.tfstate", joinStep), fmt.Sprintf("-config=%s", findTemplatesForStep(joinStep)), "aws_autoscaling_group.workers", fmt.Sprintf("%s-workers", m.cluster.Name)) + +} + +func readClusterConfig(configFilePath string) (*config.Cluster, error) { + config, err := config.ParseFile(configFilePath) + if err != nil { + return nil, fmt.Errorf("%s is not a valid config file: %s", configFilePath, err) + } + + return &config.Clusters[0], nil +} + +func readClusterConfigStep(m *metadata) error { + var configFilePath string + + if m.configFilePath != "" { + configFilePath = m.configFilePath + } else { + configFilePath = filepath.Join(m.clusterDir, configFileName) + } + + cluster, err := readClusterConfig(configFilePath) + if err != nil { + return err + } + + m.cluster = *cluster + + return nil +} + +func waitForNCG(m *metadata) error { + config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(m.clusterDir, kubeConfigPath)) if err != nil { return err } @@ -34,24 +143,22 @@ func waitForNcg(m *metadata) error { time.Sleep(time.Second * time.Duration(wait)) retries-- } - return errors.New("NCG is not running") -} -func destroyCname(m *metadata) error { - return runTfCommand(m.statePath, "destroy", "-force", "-state=bootstrap.tfstate", "-target=aws_route53_record.tectonic_ncg", tectonic.FindTemplatesForStep("bootstrap")) + return errors.New("NCG is not running") } -func importAutoScalingGroup(m *metadata) error { - bp := m.statePath - var err error - err = runTfCommand(bp, "import", "-state=joining.tfstate", "-config="+tectonic.FindTemplatesForStep("joining"), "aws_autoscaling_group.masters", m.clusterName+"-masters") +func writeFile(path, content string) error { + f, err := os.Create(path) if err != nil { return err } - err = runTfCommand(bp, "import", "-state=joining.tfstate", "-config="+tectonic.FindTemplatesForStep("joining"), "aws_autoscaling_group.workers", m.clusterName+"-workers") - if err != nil { + defer f.Close() + + w := bufio.NewWriter(f) + if _, err := fmt.Fprintln(w, content); err != nil { return err } - return nil + w.Flush() + return nil } diff --git a/installer/pkg/workflow/workflow.go b/installer/pkg/workflow/workflow.go index 4559aa17a9..849b1d92c5 100644 --- a/installer/pkg/workflow/workflow.go +++ b/installer/pkg/workflow/workflow.go @@ -1,10 +1,6 @@ package workflow -// Workflow is a high-level representation -// of a set of actions performed in a predictable order. -type Workflow interface { - Execute() error -} +import "github.com/coreos/tectonic-installer/installer/pkg/config" // metadata is the state store of the current workflow execution. // It is meant to carry state for one step to another. @@ -13,11 +9,9 @@ type Workflow interface { // Steps taked thier inputs from the metadata object and persist // results onto it for later consumption. type metadata struct { - // TODO: use config and cluster structs - clusterName string - configFile string - statePath string - platform string + cluster config.Cluster + configFilePath string + clusterDir string } // Step is the entrypoint of a workflow step implementation. @@ -25,18 +19,20 @@ type metadata struct { // Next, add a refrence to this new function in a Workflow's steps list. type Step func(*metadata) error -type simpleWorkflow struct { +// Workflow is a high-level representation +// of a set of actions performed in a predictable order. +type Workflow struct { metadata metadata steps []Step } -func (w simpleWorkflow) Execute() error { - var err error +// Execute runs all steps in order. +func (w Workflow) Execute() error { for _, step := range w.steps { - err = step(&w.metadata) - if err != nil { + if err := step(&w.metadata); err != nil { return err } } + return nil } diff --git a/installer/pkg/workflow/workflow_test.go b/installer/pkg/workflow/workflow_test.go index 8a18ca4c27..2cfac0a224 100644 --- a/installer/pkg/workflow/workflow_test.go +++ b/installer/pkg/workflow/workflow_test.go @@ -41,7 +41,7 @@ func TestWorkflowTypeExecute(t *testing.T) { } for _, tc := range testCases { - wf := simpleWorkflow{ + wf := Workflow{ metadata: tc.m, steps: tc.steps, }