diff --git a/BUILD.bazel b/BUILD.bazel index 6494961efd2..a7164142de6 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -76,7 +76,7 @@ pkg_tar( name = "tectonic-%s" % TECTONIC_VERSION, srcs = [ "//:template_resources", - "//examples:tectonic_cli_examples", + "//examples:config_examples", ], extension = "tar.gz", mode = "0666", diff --git a/Documentation/dev/libvirt-howto.md b/Documentation/dev/libvirt-howto.md index 0097724d099..d601e4af462 100644 --- a/Documentation/dev/libvirt-howto.md +++ b/Documentation/dev/libvirt-howto.md @@ -84,13 +84,13 @@ iptables -I INPUT -p tcp -s 192.168.124.0/24 -d 192.168.124.1 --dport 16509 \ ``` #### 1.7 Prepare the configuration file -1. `cp examples/tectonic.libvirt.yaml ./` +1. `cp examples/libvirt.yaml ./` 1. Edit the configuration file: 1. Set an email and password in the `admin` section - 1. Set a `baseDomain` (to `tt.testing`) 1. Set the `sshKey` in the `admin` section to the **contents** of an ssh key (e.g. `ssh-rsa AAAA...`) - 1. Set the `imagePath` to the **absolute** path of the operating system image you downloaded - 1. Set the `name` (e.g. test1) + 1. Set a `baseDomain` (to `tt.testing`) + 1. Set the `imagePath` entries to the **absolute** path of the operating system image you downloaded + 1. Set the `name` (e.g. `test1`) 1. Look at the `podCIDR` and `serviceCIDR` fields in the `networking` section. Make sure they don't conflict with anything important. 1. Set the `pullSecret` to your JSON pull secret. @@ -132,7 +132,7 @@ alias tectonic="${PWD}/tectonic-dev/installer/tectonic" Initialize (the environment variables are a convenience): ```sh -tectonic init --config=../tectonic.libvirt.yaml +tectonic init --config=../libvirt.yaml export CLUSTER_NAME= export BASE_DOMAIN= ``` diff --git a/README.md b/README.md index 69a8c89a6a3..573af119998 100644 --- a/README.md +++ b/README.md @@ -35,13 +35,13 @@ These instructions can be used for AWS: 5. Edit Tectonic configuration file including the $CLUSTER_NAME ```sh - $EDITOR examples/tectonic.aws.yaml + $EDITOR examples/aws.yaml ``` 6. Prepare a local configuration. The structure behind the YAML input is described [here][godoc-InstallConfig]. ```sh - tectonic init --config=examples/tectonic.aws.yaml + tectonic init --config=examples/aws.yaml ``` 7. Install Tectonic cluster @@ -51,7 +51,7 @@ These instructions can be used for AWS: 8. Visit `https://{$CLUSTER_NAME}-api.${BASE_DOMAIN}:6443/console/`. You may need to ignore a certificate warning if you did not configure a CA known to your browser. - Log in with the admin credentials you configured in `tectonic.aws.yaml`. + Log in with the admin credentials you configured in `aws.yaml`. 9. Teardown Tectonic cluster ```sh diff --git a/examples/BUILD.bazel b/examples/BUILD.bazel index 33b91ac6a78..37baf66af6a 100644 --- a/examples/BUILD.bazel +++ b/examples/BUILD.bazel @@ -1,9 +1,9 @@ -example_cli_configs = glob(["tectonic.*.yaml"]) +config_examples = glob(["*.yaml"]) filegroup( - name = "tectonic_cli_examples", - srcs = example_cli_configs, + name = "config_examples", + srcs = config_examples, visibility = ["//visibility:public"], ) -exports_files(example_cli_configs) +exports_files(config_examples) diff --git a/examples/aws.yaml b/examples/aws.yaml new file mode 100644 index 00000000000..19bffa93c8a --- /dev/null +++ b/examples/aws.yaml @@ -0,0 +1,83 @@ +metadata: + + # The name of the cluster. + # If used in a cloud-environment, this will be prepended to `baseDomain` to construct a domain for the OpenShift console. + # + # Note: This field MUST be set manually prior to creating the cluster. + # Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. + name: + +# (optional) A cluster ID for this cluster. +# This defaults to a random ID. +clusterID: + +admin: + email: a@b.c + password: verysecure + sshKey: "ssh-ed25519 AAAA..." + +# The base DNS domain of the cluster. It must NOT contain a trailing period. Some +# DNS providers will automatically add this if necessary. +# +# Example: `openshift.example.com`. +# +# Note: This field MUST be set manually prior to creating the cluster. +baseDomain: + +networking: + # This declares the IP range to assign Kubernetes pod IPs in CIDR notation. + podCIDR: 10.2.0.0/16 + + # This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. + # The maximum size of this IP range is /12 + serviceCIDR: 10.3.0.0/16 + + # (optional) Configures the network to be used in the cluster. One of the following values can be used: + # + # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. + # + # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. + # + # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. + # + # - "none": disables the installation of any Pod level networking layer. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. + # type: flannel + +machines: + - name: master + replicas: 1 + + - name: worker + replicas: 3 + +platform: + aws: + # The target AWS region for the cluster. + region: us-east-1 + + # (optional) Additional tags for AWS resources created for the cluster. + # + # Example: {"key": "value", "foo": "bar"} + # userTags: + + # (optional) ID of an existing VPC to launch nodes into. + # If unset a new VPC is created. + # + # Example: `vpc-123456` + # vpcID: + + # Block of IP addresses used by the VPC. + # This should not overlap with any other networks, such as a private datacenter connected via Direct Connect. + vpcCIDRBlock: 10.0.0.0/16 + +# The pull secret in JSON format. +# This is known to be a "Docker pull secret" as produced by the docker login [1] command. +# A sample JSON content is shown in [2]. +# You can download the pull secret from your Account overview page at [3]. +# +# [1] https://docs.docker.com/engine/reference/commandline/login/ +# +# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup +# +# [3] https://account.coreos.com/overview +pullSecret: '{"auths": {}}' diff --git a/examples/libvirt.yaml b/examples/libvirt.yaml new file mode 100644 index 00000000000..2acd9a288b7 --- /dev/null +++ b/examples/libvirt.yaml @@ -0,0 +1,81 @@ +metadata: + + # The name of the cluster. + # If used in a cloud-environment, this will be prepended to `baseDomain` to construct a domain for the OpenShift console. + # + # Note: This field MUST be set manually prior to creating the cluster. + # Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. + name: + +# (optional) A cluster ID for this cluster. +# This defaults to a random ID. +clusterID: + +admin: + email: a@b.c + password: verysecure + sshKey: "ssh-ed25519 AAAA..." + +# The base DNS domain of the cluster. It must NOT contain a trailing period. Some +# DNS providers will automatically add this if necessary. +# +# Example: `openshift.example.com`. +# +# Note: This field MUST be set manually prior to creating the cluster. +baseDomain: + +networking: + # This declares the IP range to assign Kubernetes pod IPs in CIDR notation. + podCIDR: 10.2.0.0/16 + + # This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. + # The maximum size of this IP range is /12 + serviceCIDR: 10.3.0.0/16 + + # (optional) Configures the network to be used in the cluster. One of the following values can be used: + # + # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. + # + # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. + # + # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. + # + # - "none": disables the installation of any Pod level networking layer. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. + # type: flannel + +machines: + - name: master + replicas: 1 + platform: + libvirt: + qcowImagePath: /path/to/image + + - name: worker + replicas: 2 + platform: + libvirt: + qcowImagePath: /path/to/image + +platform: + libvirt: + # You must specify an IP address here that libvirtd is listening on, + # and that the cluster-api controller pod will be able to connect + # to. Often 192.168.122.1 is the default for the virbr0 interface. + uri: qemu+tcp://192.168.122.1/system + + network: + name: openshift + ifName: tt0 + ipRange: 192.168.124.0/24 + +# The pull secret in JSON format. +# This is known to be a "Docker pull secret" as produced by the docker login [1] command. +# A sample JSON content is shown in [2]. +# You can download the pull secret from your Account overview page at [3]. +# +# [1] https://docs.docker.com/engine/reference/commandline/login/ +# +# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup +# +# [3] https://account.coreos.com/overview +pullSecret: '{"auths": {}}' diff --git a/examples/tectonic.aws.yaml b/examples/tectonic.aws.yaml deleted file mode 100644 index c2a5921e073..00000000000 --- a/examples/tectonic.aws.yaml +++ /dev/null @@ -1,229 +0,0 @@ -admin: - email: "a@b.c" - password: "verysecure" - sshKey: "ssh-ed25519 AAAA..." -aws: - # (optional) AMI override for all nodes. Example: `ami-foobar123`. - # ec2AMIOverride: - - external: - # (optional) List of subnet IDs within an existing VPC to deploy master nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # masterSubnetIDs: - - # (optional) If set, the given Route53 zone ID will be used as the internal (private) zone. - # This zone will be used to create etcd DNS records as well as internal API and internal Ingress records. - # If set, no additional private zone will be created. - # - # Example: `"Z1ILINNUJGTAO1"` - # privateZone: - - # (optional) ID of an existing VPC to launch nodes into. - # If unset a new VPC is created. - # - # Example: `vpc-123456` - # vpcID: - - # (optional) List of subnet IDs within an existing VPC to deploy worker nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # workerSubnetIDs: - - # (optional) Extra AWS tags to be applied to created resources. - # - # Example: `{ "key" = "value", "foo" = "bar" }` - # extraTags: - - # (optional) Name of IAM role to use to access AWS in order to deploy the Tectonic Cluster. - # The name is also the full role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # installerRole: - - master: - # (optional) This configures master availability zones and their corresponding subnet CIDRs directly. - # - # Example: - # `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }` - # customSubnets: - - # Instance size for the master node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for master nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of master nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - rootVolume: - # The amount of provisioned IOPS for the root block device of master nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of master nodes. - size: 30 - - # The type of volume for the root block device of master nodes. - type: gp2 - - # (optional) If set to true, create private-facing ingress resources (ELB, A-records). - # If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone. - # privateEndpoints: true - - # (optional) This declares the AWS credentials profile to use. - # profile: default - - # (optional) If set to true, create public-facing ingress resources (ELB, A-records). - # If set to false, no public-facing ingress resources will be created. - # publicEndpoints: true - - # The target AWS region for the cluster. - region: us-east-1 - - # Block of IP addresses used by the VPC. - # This should not overlap with any other networks, such as a private datacenter connected via Direct Connect. - vpcCIDRBlock: 10.0.0.0/16 - - worker: - # (optional) This configures worker availability zones and their corresponding subnet CIDRs directly. - # - # Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }` - # customSubnets: - - # Instance size for the worker node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for worker nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of worker nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - # (optional) List of ELBs to attach all worker instances to. - # This is useful for exposing NodePort services via load-balancers managed separately from the cluster. - # - # Example: - # * `["ingress-nginx"]` - # loadBalancers: - - rootVolume: - # The amount of provisioned IOPS for the root block device of worker nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of worker nodes. - size: 30 - - # The type of volume for the root block device of worker nodes. - type: gp2 - -# The base DNS domain of the cluster. It must NOT contain a trailing period. Some -# DNS providers will automatically add this if necessary. -# -# Example: `openshift.example.com`. -# -# Note: This field MUST be set manually prior to creating the cluster. -# This applies only to cloud platforms. -baseDomain: - -ca: - # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. - # If left blank, a CA certificate will be automatically generated. - # cert: - - # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. - # This field is mandatory if `ca_cert` is set. - # key: - - # (optional) The algorithm used to generate ca_key. - # The default value is currently recommended. - # This field is mandatory if `ca_cert` is set. - # keyAlg: RSA - -iscsi: - # (optional) Start iscsid.service to enable iscsi volume attachment. - # enabled: false - -master: - # The name of the node pool(s) to use for master nodes - nodePools: - - master - -# The name of the cluster. -# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. -# -# Note: This field MUST be set manually prior to creating the cluster. -# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. -name: - -networking: - # (optional) This declares the MTU used by Calico. - # mtu: - - # This declares the IP range to assign Kubernetes pod IPs in CIDR notation. - podCIDR: 10.2.0.0/16 - - # This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. - # The maximum size of this IP range is /12 - serviceCIDR: 10.3.0.0/16 - - # (optional) Configures the network to be used in Tectonic. One of the following values can be used: - # - # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. - # - # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. - # - # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. - # - # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. - # type: flannel - -nodePools: - # The number of master nodes to be created. - # This applies only to cloud platforms. - - count: 1 - name: master - - # The number of worker nodes to be created. - # This applies only to cloud platforms. - - count: 3 - name: worker - -# The platform used for deploying. -platform: aws - -# The pull secret in JSON format. -# This is known to be a "Docker pull secret" as produced by the docker login [1] command. -# A sample JSON content is shown in [2]. -# You can download the pull secret from your Account overview page at [3]. -# -# [1] https://docs.docker.com/engine/reference/commandline/login/ -# -# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup -# -# [3] https://account.coreos.com/overview -pullSecret: '{"auths": {}}' - -worker: - # The name of the node pool(s) to use for workers - nodePools: - - worker diff --git a/examples/tectonic.libvirt.yaml b/examples/tectonic.libvirt.yaml deleted file mode 100644 index 33a06c32659..00000000000 --- a/examples/tectonic.libvirt.yaml +++ /dev/null @@ -1,103 +0,0 @@ -admin: - email: a@b.c - password: verysecure - sshKey: "ssh-ed25519 AAAA..." -# The base DNS domain of the cluster. It must NOT contain a trailing period. Some -# DNS providers will automatically add this if necessary. -# -# Example: `openshift.example.com`. -# -# Note: This field MUST be set manually prior to creating the cluster. -baseDomain: - -libvirt: - # You must specify an IP address here that libvirtd is listening on, - # and that the cluster-api controller pod will be able to connect - # to. Often 192.168.122.1 is the default for the virbr0 interface. - uri: qemu+tcp://192.168.122.1/system - network: - name: tectonic - ifName: tt0 - ipRange: 192.168.124.0/24 - imagePath: /path/to/image - -ca: - # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. - # If left blank, a CA certificate will be automatically generated. - # cert: - - # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. - # This field is mandatory if `ca_cert` is set. - # key: - - # (optional) The algorithm used to generate ca_key. - # The default value is currently recommended. - # This field is mandatory if `ca_cert` is set. - # keyAlg: RSA - -iscsi: - # (optional) Start iscsid.service to enable iscsi volume attachment. - # enabled: false - -master: - nodePools: - - master - -# The name of the cluster. -# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. -# -# Note: This field MUST be set manually prior to creating the cluster. -# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. -name: - -networking: - # (optional) This declares the MTU used by Calico. - # mtu: - - # (optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation. - podCIDR: 10.2.0.0/16 - - # (optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. - # The maximum size of this IP range is /12 - serviceCIDR: 10.3.0.0/16 - - # (optional) Configures the network to be used in Tectonic. One of the following values can be used: - # - # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. - # - # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. - # - # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. - # - # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. - # type: flannel - -nodePools: - # The number of master nodes to be created. - # This applies only to cloud platforms. - - count: 1 - name: master - - # The number of worker nodes to be created. - # This applies only to cloud platforms. - - count: 2 - name: worker - -# The platform used for deploying. -platform: libvirt - -# The pull secret in JSON format. -# This is known to be a "Docker pull secret" as produced by the docker login [1] command. -# A sample JSON content is shown in [2]. -# You can download the pull secret from your Account overview page at [3]. -# -# [1] https://docs.docker.com/engine/reference/commandline/login/ -# -# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup -# -# [3] https://account.coreos.com/overview -pullSecret: '{"auths": {}}' - -worker: - nodePools: - - worker diff --git a/installer/pkg/config-generator/fixtures/kube-system.yaml b/installer/pkg/config-generator/fixtures/kube-system.yaml index 0c2934533c6..1ac4f4ef71c 100644 --- a/installer/pkg/config-generator/fixtures/kube-system.yaml +++ b/installer/pkg/config-generator/fixtures/kube-system.yaml @@ -10,7 +10,6 @@ data: - name: master platform: aws: - iamRoleName: "" rootVolume: iops: 100 size: 30 @@ -20,7 +19,6 @@ data: - name: worker platform: aws: - iamRoleName: "" rootVolume: iops: 100 size: 30 @@ -38,7 +36,6 @@ data: aws: region: us-east-1 vpcCIDRBlock: 10.0.0.0/16 - vpcID: "" pullSecret: '{"auths": {}}' kco-config: | apiVersion: v1 diff --git a/installer/pkg/config/BUILD.bazel b/installer/pkg/config/BUILD.bazel index 604d51e86fa..0aaebdc1028 100644 --- a/installer/pkg/config/BUILD.bazel +++ b/installer/pkg/config/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "//installer/pkg/config/libvirt:go_default_library", "//installer/pkg/validate:go_default_library", "//pkg/rhcos:go_default_library", + "//pkg/types:go_default_library", "//vendor/github.com/Sirupsen/logrus:go_default_library", "//vendor/github.com/coreos/ignition/config/v2_2:go_default_library", "//vendor/github.com/coreos/tectonic-config/config/tectonic-network:go_default_library", @@ -25,11 +26,16 @@ go_library( go_test( name = "go_default_test", size = "small", - srcs = ["validate_test.go"], + srcs = [ + "parser_test.go", + "validate_test.go", + ], data = glob(["fixtures/**"]), embed = [":go_default_library"], deps = [ "//installer/pkg/config/aws:go_default_library", "//installer/pkg/config/libvirt:go_default_library", + "//vendor/github.com/coreos/tectonic-config/config/tectonic-network:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/installer/pkg/config/parser.go b/installer/pkg/config/parser.go index 0114babe315..942cc4d5c27 100644 --- a/installer/pkg/config/parser.go +++ b/installer/pkg/config/parser.go @@ -5,30 +5,24 @@ import ( "fmt" "io/ioutil" - "gopkg.in/yaml.v2" - + "github.com/coreos/tectonic-config/config/tectonic-network" + "github.com/openshift/installer/installer/pkg/config/aws" + "github.com/openshift/installer/installer/pkg/config/libvirt" "github.com/openshift/installer/pkg/rhcos" + "github.com/openshift/installer/pkg/types" + "gopkg.in/yaml.v2" ) // ParseConfig parses a yaml string and returns, if successful, a Cluster. func ParseConfig(data []byte) (*Cluster, error) { cluster := defaultCluster - if err := yaml.Unmarshal(data, &cluster); err != nil { - return nil, err - } - - // Deprecated: remove after openshift/release is ported to pullSecret - if cluster.PullSecretPath != "" { - if cluster.PullSecret != "" { - return nil, errors.New("pullSecretPath is deprecated; just set pullSecret") - } - - data, err := ioutil.ReadFile(cluster.PullSecretPath) - if err != nil { + err := parseInstallConfig(data, &cluster) + if err != nil { + err2 := parseLegacyConfig(data, &cluster) + if err2 != nil { return nil, err } - cluster.PullSecret = string(data) } if cluster.EC2AMIOverride == "" { @@ -42,6 +36,127 @@ func ParseConfig(data []byte) (*Cluster, error) { return &cluster, nil } +func parseInstallConfig(data []byte, cluster *Cluster) (err error) { + installConfig := &types.InstallConfig{} + err = yaml.Unmarshal(data, &installConfig) + if err != nil { + return err + } + + cluster.Name = installConfig.Name + cluster.Internal.ClusterID = installConfig.ClusterID + cluster.Admin = Admin{ + Email: installConfig.Admin.Email, + Password: installConfig.Admin.Password, + SSHKey: installConfig.Admin.SSHKey, + } + cluster.BaseDomain = installConfig.BaseDomain + cluster.Networking = Networking{ + Type: tectonicnetwork.NetworkType(installConfig.Networking.Type), + ServiceCIDR: installConfig.Networking.ServiceCIDR.String(), + PodCIDR: installConfig.Networking.PodCIDR.String(), + } + + for _, machinePool := range installConfig.Machines { + nodePool := NodePool{ + Name: machinePool.Name, + } + if machinePool.Replicas == nil { + nodePool.Count = 1 + } else { + nodePool.Count = int(*machinePool.Replicas) + } + cluster.NodePools = append(cluster.NodePools, nodePool) + switch machinePool.Name { + case "master": + if machinePool.Platform.AWS != nil { + cluster.AWS.Master = aws.Master{ + EC2Type: machinePool.Platform.AWS.InstanceType, + IAMRoleName: machinePool.Platform.AWS.IAMRoleName, + MasterRootVolume: aws.MasterRootVolume{ + IOPS: machinePool.Platform.AWS.EC2RootVolume.IOPS, + Size: machinePool.Platform.AWS.EC2RootVolume.Size, + Type: machinePool.Platform.AWS.EC2RootVolume.Type, + }, + } + } + case "worker": + if machinePool.Platform.AWS != nil { + cluster.AWS.Worker = aws.Worker{ + EC2Type: machinePool.Platform.AWS.InstanceType, + IAMRoleName: machinePool.Platform.AWS.IAMRoleName, + WorkerRootVolume: aws.WorkerRootVolume{ + IOPS: machinePool.Platform.AWS.EC2RootVolume.IOPS, + Size: machinePool.Platform.AWS.EC2RootVolume.Size, + Type: machinePool.Platform.AWS.EC2RootVolume.Type, + }, + } + } + default: + return fmt.Errorf("unrecognized machine pool %q", machinePool.Name) + } + + if machinePool.Platform.Libvirt != nil { + if cluster.Libvirt.QCOWImagePath != "" && cluster.Libvirt.QCOWImagePath != machinePool.Platform.Libvirt.QCOWImagePath { + return fmt.Errorf("per-machine-pool images are not yet supported") + } + cluster.Libvirt.QCOWImagePath = machinePool.Platform.Libvirt.QCOWImagePath + } + } + + if installConfig.Platform.AWS != nil { + cluster.AWS = aws.AWS{ + Region: installConfig.Platform.AWS.Region, + ExtraTags: installConfig.Platform.AWS.UserTags, + External: aws.External{ + VPCID: installConfig.Platform.AWS.VPCID, + }, + VPCCIDRBlock: installConfig.Platform.AWS.VPCCIDRBlock, + } + } + + if installConfig.Platform.Libvirt != nil { + masterIPs := make([]string, len(installConfig.Platform.Libvirt.MasterIPs)) + for i, ip := range installConfig.Platform.Libvirt.MasterIPs { + masterIPs[i] = ip.String() + } + cluster.Libvirt = libvirt.Libvirt{ + URI: installConfig.Platform.Libvirt.URI, + Network: libvirt.Network{ + Name: installConfig.Platform.Libvirt.Network.Name, + IfName: installConfig.Platform.Libvirt.Network.IfName, + IPRange: installConfig.Platform.Libvirt.Network.IPRange, + }, + MasterIPs: masterIPs, + } + } + + cluster.PullSecret = installConfig.PullSecret + + return nil +} + +func parseLegacyConfig(data []byte, cluster *Cluster) (err error) { + if err := yaml.Unmarshal(data, cluster); err != nil { + return err + } + + // Deprecated: remove after openshift/release is ported to pullSecret + if cluster.PullSecretPath != "" { + if cluster.PullSecret != "" { + return errors.New("pullSecretPath is deprecated; just set pullSecret") + } + + data, err := ioutil.ReadFile(cluster.PullSecretPath) + if err != nil { + return err + } + cluster.PullSecret = string(data) + } + + return nil +} + // ParseConfigFile parses a yaml file and returns, if successful, a Cluster. func ParseConfigFile(path string) (*Cluster, error) { data, err := ioutil.ReadFile(path) diff --git a/installer/pkg/config/parser_test.go b/installer/pkg/config/parser_test.go new file mode 100644 index 00000000000..3b19e604e2d --- /dev/null +++ b/installer/pkg/config/parser_test.go @@ -0,0 +1,89 @@ +package config + +import ( + "testing" + + "github.com/coreos/tectonic-config/config/tectonic-network" + "github.com/openshift/installer/installer/pkg/config/aws" + "github.com/openshift/installer/installer/pkg/config/libvirt" + "github.com/stretchr/testify/assert" +) + +func TestParseInstallConfig(t *testing.T) { + data := []byte(`admin: + email: test-email + password: test-password + sshKey: test-sshkey +baseDomain: test-domain +clusterID: test-cluster-id +machines: + - name: master + replicas: 3 + - name: worker + replicas: 2 + platform: + aws: + type: m4-large +metadata: + name: test-cluster-name +networking: + podCIDR: 10.2.0.0/16 + serviceCIDR: 10.3.0.0/16 + type: flannel +pullSecret: '{"auths": {}}' +`) + + actual, err := ParseConfig(data) + if err != nil { + t.Fatal(err) + } + actual.EC2AMIOverride = "" + + expected := &Cluster{ + Name: "test-cluster-name", + Admin: Admin{ + Email: "test-email", + Password: "test-password", + SSHKey: "test-sshkey", + }, + BaseDomain: "test-domain", + CA: CA{ + RootCAKeyAlg: "RSA", + }, + Internal: Internal{ + ClusterID: "test-cluster-id", + }, + Networking: Networking{ + Type: tectonicnetwork.NetworkFlannel, + PodCIDR: "10.2.0.0/16", + ServiceCIDR: "10.3.0.0/16", + }, + NodePools: []NodePool{ + { + Name: "master", + Count: 3, + }, + { + Name: "worker", + Count: 2, + }, + }, + AWS: aws.AWS{ + Endpoints: aws.EndpointsAll, + Region: aws.DefaultRegion, + Profile: aws.DefaultProfile, + VPCCIDRBlock: "10.0.0.0/16", + Worker: aws.Worker{ + EC2Type: "m4-large", + }, + }, + Libvirt: libvirt.Libvirt{ + Network: libvirt.Network{ + IfName: libvirt.DefaultIfName, + }, + }, + PullSecret: "{\"auths\": {}}", + } + + assert.Equal(t, expected, actual) +} diff --git a/pkg/asset/installconfig/installconfig_test.go b/pkg/asset/installconfig/installconfig_test.go index e45607f410a..85a3a058c0f 100644 --- a/pkg/asset/installconfig/installconfig_test.go +++ b/pkg/asset/installconfig/installconfig_test.go @@ -74,9 +74,7 @@ func TestInstallConfigGenerate(t *testing.T) { "test-region", }, expectedPlatformYaml: ` aws: - region: test-region - vpcCIDRBlock: "" - vpcID: ""`, + region: test-region`, }, { name: "libvirt", @@ -86,11 +84,7 @@ func TestInstallConfigGenerate(t *testing.T) { }, expectedPlatformYaml: ` libvirt: URI: test-uri - masterIPs: null - network: - if: "" - ipRange: "" - name: ""`, + network: {}`, }, } for _, tc := range cases { @@ -174,7 +168,6 @@ metadata: networking: podCIDR: 10.2.0.0/16 serviceCIDR: 10.3.0.0/16 - type: "" platform: %s pullSecret: test-pull-secret diff --git a/pkg/ipnet/BUILD.bazel b/pkg/ipnet/BUILD.bazel index 8b0557e8054..b4105afc4b4 100644 --- a/pkg/ipnet/BUILD.bazel +++ b/pkg/ipnet/BUILD.bazel @@ -5,10 +5,12 @@ go_library( srcs = ["ipnet.go"], importpath = "github.com/openshift/installer/pkg/ipnet", visibility = ["//visibility:public"], + deps = ["//vendor/gopkg.in/yaml.v2:go_default_library"], ) go_test( name = "go_default_test", srcs = ["ipnet_test.go"], embed = [":go_default_library"], + deps = ["//vendor/gopkg.in/yaml.v2:go_default_library"], ) diff --git a/pkg/ipnet/ipnet.go b/pkg/ipnet/ipnet.go index 48ca01e6706..3da0096a541 100644 --- a/pkg/ipnet/ipnet.go +++ b/pkg/ipnet/ipnet.go @@ -3,8 +3,11 @@ package ipnet import ( "encoding/json" + "fmt" "net" "reflect" + + yaml "gopkg.in/yaml.v2" ) var nullString = "null" @@ -16,7 +19,7 @@ type IPNet struct { net.IPNet } -// MarshalJSON interface for an IPNet +// MarshalJSON interface for an IPNet. func (ipnet IPNet) MarshalJSON() (data []byte, err error) { if reflect.DeepEqual(ipnet.IPNet, emptyIPNet) { return nullBytes, nil @@ -25,7 +28,7 @@ func (ipnet IPNet) MarshalJSON() (data []byte, err error) { return json.Marshal(ipnet.String()) } -// UnmarshalJSON interface for an IPNet +// UnmarshalJSON interface for an IPNet. func (ipnet *IPNet) UnmarshalJSON(b []byte) (err error) { if string(b) == nullString { ipnet.IP = net.IP{} @@ -34,11 +37,15 @@ func (ipnet *IPNet) UnmarshalJSON(b []byte) (err error) { } var cidr string - err = json.Unmarshal(b, &cidr) + err = yaml.Unmarshal(b, &cidr) if err != nil { return err } + return ipnet.parseCIDR(cidr) +} + +func (ipnet *IPNet) parseCIDR(cidr string) (err error) { ip, net, err := net.ParseCIDR(cidr) if err != nil { return err @@ -47,3 +54,30 @@ func (ipnet *IPNet) UnmarshalJSON(b []byte) (err error) { ipnet.Mask = net.Mask return nil } + +// MarshalYAML interface for an IPNet. +func (ipnet *IPNet) MarshalYAML() (replacement interface{}, err error) { + if ipnet == nil || reflect.DeepEqual(ipnet.IPNet, emptyIPNet) { + return nil, nil + } + + return ipnet.String(), nil +} + +// UnmarshalYAML interface for an IPNet. +func (ipnet *IPNet) UnmarshalYAML(unmarshal func(interface{}) error) (err error) { + var data interface{} + err = unmarshal(&data) + if err != nil { + return nil + } + + switch data.(type) { + case nil: + return nil + case string: + return ipnet.parseCIDR(data.(string)) + default: + return fmt.Errorf("cannot unmarshal %v into an IPNet", data) + } +} diff --git a/pkg/ipnet/ipnet_test.go b/pkg/ipnet/ipnet_test.go index 94b927d6f47..60090d6ced3 100644 --- a/pkg/ipnet/ipnet_test.go +++ b/pkg/ipnet/ipnet_test.go @@ -4,6 +4,8 @@ import ( "encoding/json" "net" "testing" + + yaml "gopkg.in/yaml.v2" ) func assertJSON(t *testing.T, data interface{}, expected string) { @@ -18,7 +20,19 @@ func assertJSON(t *testing.T, data interface{}, expected string) { } } -func TestMarshal(t *testing.T) { +func assertYAML(t *testing.T, data interface{}, expected string) { + actualBytes, err := yaml.Marshal(data) + if err != nil { + t.Fatal(err) + } + actual := string(actualBytes) + + if actual != expected { + t.Fatalf("%s != %s", actual, expected) + } +} + +func TestMarshalJSON(t *testing.T) { stdlibIPNet := &net.IPNet{ IP: net.IP{192, 168, 0, 10}, Mask: net.IPv4Mask(255, 255, 255, 0), @@ -30,7 +44,7 @@ func TestMarshal(t *testing.T) { assertJSON(t, nil, "null") } -func TestUnmarshal(t *testing.T) { +func TestUnmarshalJSON(t *testing.T) { for _, ipNetIn := range []*IPNet{ nil, {IPNet: net.IPNet{ @@ -60,3 +74,52 @@ func TestUnmarshal(t *testing.T) { }) } } + +func TestMarshalYAML(t *testing.T) { + stdlibIPNet := &net.IPNet{ + IP: net.IP{192, 168, 0, 10}, + Mask: net.IPv4Mask(255, 255, 255, 0), + } + assertYAML(t, stdlibIPNet, `ip: 192.168.0.10 +mask: +- 255 +- 255 +- 255 +- 0 +`) + wrappedIPNet := &IPNet{IPNet: *stdlibIPNet} + assertYAML(t, wrappedIPNet, "192.168.0.10/24\n") + assertYAML(t, &IPNet{}, "null\n") + assertYAML(t, nil, "null\n") +} + +func TestUnmarshalYAML(t *testing.T) { + for _, ipNetIn := range []*IPNet{ + nil, + {IPNet: net.IPNet{ + IP: net.IP{192, 168, 0, 10}, + Mask: net.IPv4Mask(255, 255, 255, 0), + }}, + } { + data, err := yaml.Marshal(ipNetIn) + if err != nil { + t.Fatal(err) + } + + t.Run(string(data), func(t *testing.T) { + var ipNetOut *IPNet + err := yaml.Unmarshal(data, &ipNetOut) + if err != nil { + t.Fatal(err) + } + + if ipNetIn == nil { + if ipNetOut != nil { + t.Fatalf("%v != %v", ipNetOut, ipNetIn) + } + } else if ipNetOut.String() != ipNetIn.String() { + t.Fatalf("%v != %v", ipNetOut, ipNetIn) + } + }) + } +} diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index 8f002f25c9f..494c6d633c6 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -10,57 +10,57 @@ import ( // InstallConfig is the configuration for an OpenShift install. type InstallConfig struct { // +optional - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline" yaml:",inline"` - metav1.ObjectMeta `json:"metadata"` + metav1.ObjectMeta `json:"metadata" yaml:"metadata"` // ClusterID is the ID of the cluster. - ClusterID string `json:"clusterID"` + ClusterID string `json:"clusterID" yaml:"clusterID"` // Admin is the configuration for the admin user. - Admin Admin `json:"admin"` + Admin Admin `json:"admin" yaml:"admin"` // BaseDomain is the base domain to which the cluster should belong. - BaseDomain string `json:"baseDomain"` + BaseDomain string `json:"baseDomain" yaml:"baseDomain"` // Networking defines the pod network provider in the cluster. - Networking `json:"networking"` + Networking `json:"networking" yaml:"networking"` // Machines is the list of MachinePools that need to be installed. - Machines []MachinePool `json:"machines"` + Machines []MachinePool `json:"machines" yaml:"machines"` // Platform is the configuration for the specific platform upon which to // perform the installation. - Platform `json:"platform"` + Platform `json:"platform" yaml:"platform"` // PullSecret is the secret to use when pulling images. - PullSecret string `json:"pullSecret"` + PullSecret string `json:"pullSecret,omitempty" yaml:"pullSecret,omitempty"` } // Admin is the configuration for the admin user. type Admin struct { // Email is the email address of the admin user. - Email string `json:"email"` + Email string `json:"email,omitempty" yaml:"email,omitempty"` // Password is the password of the admin user. - Password string `json:"password"` + Password string `json:"password,omitempty" yaml:"password,omitempty"` // SSHKey to use for the access to compute instances. - SSHKey string `json:"sshKey,omitempty"` + SSHKey string `json:"sshKey,omitempty" yaml:"sshKey,omitempty"` } // Platform is the configuration for the specific platform upon which to perform // the installation. Only one of the platform configuration should be set. type Platform struct { // AWS is the configuration used when installing on AWS. - AWS *AWSPlatform `json:"aws,omitempty"` + AWS *AWSPlatform `json:"aws,omitempty" yaml:"aws,omitempty"` // Libvirt is the configuration used when installing on libvirt. - Libvirt *LibvirtPlatform `json:"libvirt,omitempty"` + Libvirt *LibvirtPlatform `json:"libvirt,omitempty" yaml:"libvirt,omitempty"` } // Networking defines the pod network provider in the cluster. type Networking struct { - Type NetworkType `json:"type"` - ServiceCIDR ipnet.IPNet `json:"serviceCIDR"` - PodCIDR ipnet.IPNet `json:"podCIDR"` + Type NetworkType `json:"type,omitempty" yaml:"type,omitempty"` + ServiceCIDR ipnet.IPNet `json:"serviceCIDR" yaml:"serviceCIDR"` + PodCIDR ipnet.IPNet `json:"podCIDR" yaml:"podCIDR"` } // NetworkType defines the pod network provider in the cluster. @@ -77,40 +77,40 @@ const ( // all machinesets use. type AWSPlatform struct { // Region specifies the AWS region where the cluster will be created. - Region string `json:"region"` + Region string `json:"region,omitempty" yaml:"region,omitempty"` - // UserTags specifies additional tags for AWS resources created by the cluster. - UserTags map[string]string `json:"tags,omitempty"` + // UserTags specifies additional tags for AWS resources created for the cluster. + UserTags map[string]string `json:"userTags,omitempty" yaml:"userTags,omitempty"` // VPCID specifies the vpc to associate with the cluster. // If empty, new vpc will be created. // +optional - VPCID string `json:"vpcID"` + VPCID string `json:"vpcID,omitempty" yaml:"vpcID,omitempty"` // VPCCIDRBlock // +optional - VPCCIDRBlock string `json:"vpcCIDRBlock"` + VPCCIDRBlock string `json:"vpcCIDRBlock,omitempty" yaml:"vpcCIDRBlock,omitempty"` } // LibvirtPlatform stores all the global configuration that // all machinesets use. type LibvirtPlatform struct { // URI - URI string `json:"URI"` + URI string `json:"URI,omitempty" yaml:"URI,omitempty"` // Network - Network LibvirtNetwork `json:"network"` + Network LibvirtNetwork `json:"network" yaml:"network"` // MasterIPs - MasterIPs []net.IP `json:"masterIPs"` + MasterIPs []net.IP `json:"masterIPs,omitempty" yaml:"masterIPs,omitempty"` } // LibvirtNetwork is the configuration of the libvirt network. type LibvirtNetwork struct { // Name is the name of the nework. - Name string `json:"name"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` // IfName is the name of the network interface. - IfName string `json:"if"` + IfName string `json:"if,omitempty" yaml:"if,omitempty"` // IPRange is the range of IPs to use. - IPRange string `json:"ipRange"` + IPRange string `json:"ipRange,omitempty" yaml:"ipRange,omitempty"` } diff --git a/pkg/types/machinepools.go b/pkg/types/machinepools.go index 59b14181cfd..b86be170d26 100644 --- a/pkg/types/machinepools.go +++ b/pkg/types/machinepools.go @@ -3,23 +3,24 @@ package types // MachinePool is a pool of machines to be installed. type MachinePool struct { // Name is the name of the machine pool. - Name string `json:"name"` + Name string `json:"name" yaml:"name"` // Replicas is the count of machines for this machine pool. // Default is 1. - Replicas *int64 `json:"replicas"` + Replicas *int64 `json:"replicas,omitempty" yaml:"replicas,omitempty"` // Platform is configuration for machine pool specific to the platfrom. - Platform MachinePoolPlatform `json:"platform"` + Platform MachinePoolPlatform `json:"platform" yaml:"platform"` } // MachinePoolPlatform is the platform-specific configuration for a machine // pool. Only one of the platforms should be set. type MachinePoolPlatform struct { // AWS is the configuration used when installing on AWS. - AWS *AWSMachinePoolPlatform `json:"aws,omitempty"` + AWS *AWSMachinePoolPlatform `json:"aws,omitempty" yaml:"aws,omitempty"` + // Libvirt is the configuration used when installing on libvirt. - Libvirt *LibvirtMachinePoolPlatform `json:"libvirt,omitempty"` + Libvirt *LibvirtMachinePoolPlatform `json:"libvirt,omitempty" yaml:"libvirt,omitempty"` } // AWSMachinePoolPlatform stores the configuration for a machine pool @@ -27,29 +28,29 @@ type MachinePoolPlatform struct { type AWSMachinePoolPlatform struct { // InstanceType defines the ec2 instance type. // eg. m4-large - InstanceType string `json:"type"` + InstanceType string `json:"type,omitempty" yaml:"type,omitempty"` // IAMRoleName defines the IAM role associated // with the ec2 instance. - IAMRoleName string `json:"iamRoleName"` + IAMRoleName string `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty"` // EC2RootVolume defines the storage for ec2 instance. - EC2RootVolume `json:"rootVolume"` + EC2RootVolume `json:"rootVolume" yaml:"rootVolume"` } // EC2RootVolume defines the storage for an ec2 instance. type EC2RootVolume struct { // IOPS defines the iops for the instance. - IOPS int `json:"iops"` + IOPS int `json:"iops,omitempty" yaml:"iops,omitempty"` // Size defines the size of the instance. - Size int `json:"size"` + Size int `json:"size,omitempty" yaml:"size,omitempty"` // Type defines the type of the instance. - Type string `json:"type"` + Type string `json:"type,omitempty" yaml:"type,omitempty"` } // LibvirtMachinePoolPlatform stores the configuration for a machine pool // installed on libvirt. type LibvirtMachinePoolPlatform struct { // QCOWImagePath - QCOWImagePath string `json:"qcowImagePath"` + QCOWImagePath string `json:"qcowImagePath,omitempty" yaml:"qcowImagePath,omitempty"` } diff --git a/tests/run.sh b/tests/run.sh index 1a51e03d8c3..0c8b496d4a4 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -82,7 +82,7 @@ python <<-EOF >"${CLUSTER_NAME}.yaml" import yaml - with open('examples/tectonic.${BACKEND}.yaml') as f: + with open('examples/${BACKEND}.yaml') as f: config = yaml.load(f) config['name'] = '${CLUSTER_NAME}' with open(os.path.expanduser(os.path.join('~', '.ssh', 'id_rsa.pub'))) as f: