diff --git a/OWNERS b/OWNERS index 07b6cb50585..9e0e1584dcd 100644 --- a/OWNERS +++ b/OWNERS @@ -5,3 +5,4 @@ approvers: - installer-approvers reviewers: - installer-reviewers +component: Installer diff --git a/README.md b/README.md index e02214fa4d8..dfa15384aa1 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ First, install all [build dependencies](docs/dev/dependencies.md). -Clone this repository to `src/github.com/openshift/installer` in your [GOPATH](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable). Then build the `openshift-install` binary with: +Clone this repository. Then build the `openshift-install` binary with: ```sh hack/build.sh diff --git a/cmd/openshift-install/create.go b/cmd/openshift-install/create.go index 29c4034d11f..0776224c963 100644 --- a/cmd/openshift-install/create.go +++ b/cmd/openshift-install/create.go @@ -32,6 +32,7 @@ import ( assetstore "github.com/openshift/installer/pkg/asset/store" targetassets "github.com/openshift/installer/pkg/asset/targets" destroybootstrap "github.com/openshift/installer/pkg/destroy/bootstrap" + timer "github.com/openshift/installer/pkg/metrics/timer" "github.com/openshift/installer/pkg/types/baremetal" cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" ) @@ -96,6 +97,7 @@ var ( logrus.Fatal(errors.Wrap(err, "loading kubeconfig")) } + timer.StartTimer("Bootstrap Complete") err = waitForBootstrapComplete(ctx, config, rootOpts.dir) if err != nil { if err2 := logClusterOperatorConditions(ctx, config); err2 != nil { @@ -106,6 +108,8 @@ var ( } logrus.Fatal("Bootstrap failed to complete: ", err) } + timer.StopTimer("Bootstrap Complete") + timer.StartTimer("Bootstrap Destroy") if oi, ok := os.LookupEnv("OPENSHIFT_INSTALL_PRESERVE_BOOTSTRAP"); ok && oi != "" { logrus.Warn("OPENSHIFT_INSTALL_PRESERVE_BOOTSTRAP is set, not destroying bootstrap resources. " + @@ -117,6 +121,7 @@ var ( logrus.Fatal(err) } } + timer.StopTimer("Bootstrap Destroy") err = waitForInstallComplete(ctx, config, rootOpts.dir) if err != nil { @@ -125,6 +130,8 @@ var ( } logrus.Fatal(err) } + timer.StopTimer(timer.TotalTimeElapsed) + timer.LogSummary() }, }, assets: targetassets.Cluster, @@ -181,6 +188,8 @@ func runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args } return func(cmd *cobra.Command, args []string) { + timer.StartTimer(timer.TotalTimeElapsed) + cleanup := setupFileHook(rootOpts.dir) defer cleanup() @@ -249,6 +258,7 @@ func waitForBootstrapComplete(ctx context.Context, config *rest.Config, director apiTimeout := 20 * time.Minute logrus.Infof("Waiting up to %v for the Kubernetes API at %s...", apiTimeout, config.Host) + apiContext, cancel := context.WithTimeout(ctx, apiTimeout) defer cancel() // Poll quickly so we notice changes, but only log when the response @@ -257,10 +267,12 @@ func waitForBootstrapComplete(ctx context.Context, config *rest.Config, director logDownsample := 15 silenceRemaining := logDownsample previousErrorSuffix := "" + timer.StartTimer("API") wait.Until(func() { version, err := discovery.ServerVersion() if err == nil { logrus.Infof("API %s up", version) + timer.StopTimer("API") cancel() } else { silenceRemaining-- @@ -347,6 +359,7 @@ func waitForInitializedCluster(ctx context.Context, config *rest.Config) error { defer cancel() failing := configv1.ClusterStatusConditionType("Failing") + timer.StartTimer("Cluster Operators") var lastError string _, err = clientwatch.UntilWithSync( clusterVersionContext, @@ -362,6 +375,7 @@ func waitForInitializedCluster(ctx context.Context, config *rest.Config) error { return false, nil } if cov1helpers.IsStatusConditionTrue(cv.Status.Conditions, configv1.OperatorAvailable) { + timer.StopTimer("Cluster Operators") return true, nil } if cov1helpers.IsStatusConditionTrue(cv.Status.Conditions, failing) { @@ -413,6 +427,7 @@ func waitForConsole(ctx context.Context, config *rest.Config, directory string) // no route in a row (to show we're still alive). logDownsample := 15 silenceRemaining := logDownsample + timer.StartTimer("Console") wait.Until(func() { consoleRoutes, err := rc.RouteV1().Routes(consoleNamespace).List(metav1.ListOptions{}) if err == nil && len(consoleRoutes.Items) > 0 { @@ -445,6 +460,7 @@ func waitForConsole(ctx context.Context, config *rest.Config, directory string) if url == "" { return url, errors.New("could not get openshift-console URL") } + timer.StopTimer("Console") return url, nil } @@ -463,7 +479,7 @@ func logComplete(directory, consoleURL string) error { logrus.Info("Install complete!") logrus.Infof("To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=%s'", kubeconfig) logrus.Infof("Access the OpenShift web-console here: %s", consoleURL) - logrus.Infof("Login to the console with user: kubeadmin, password: %s", pw) + logrus.Infof("Login to the console with user: %q, and password: %q", "kubeadmin", pw) return nil } diff --git a/cmd/openshift-install/destroy.go b/cmd/openshift-install/destroy.go index 616d3631409..e813e095a3c 100644 --- a/cmd/openshift-install/destroy.go +++ b/cmd/openshift-install/destroy.go @@ -19,6 +19,7 @@ import ( _ "github.com/openshift/installer/pkg/destroy/openstack" _ "github.com/openshift/installer/pkg/destroy/ovirt" _ "github.com/openshift/installer/pkg/destroy/vsphere" + timer "github.com/openshift/installer/pkg/metrics/timer" "github.com/openshift/installer/pkg/terraform" ) @@ -54,6 +55,7 @@ func newDestroyClusterCmd() *cobra.Command { } func runDestroyCmd(directory string) error { + timer.StartTimer(timer.TotalTimeElapsed) destroyer, err := destroy.New(logrus.StandardLogger(), directory) if err != nil { return errors.Wrap(err, "Failed while preparing to destroy cluster") @@ -82,6 +84,8 @@ func runDestroyCmd(directory string) error { if err != nil && !os.IsNotExist(err) { return errors.Wrap(err, "failed to remove Terraform state") } + timer.StopTimer(timer.TotalTimeElapsed) + timer.LogSummary() return nil } @@ -95,10 +99,13 @@ func newDestroyBootstrapCmd() *cobra.Command { cleanup := setupFileHook(rootOpts.dir) defer cleanup() + timer.StartTimer(timer.TotalTimeElapsed) err := bootstrap.Destroy(rootOpts.dir) if err != nil { logrus.Fatal(err) } + timer.StopTimer(timer.TotalTimeElapsed) + timer.LogSummary() }, } } diff --git a/cmd/openshift-install/gather.go b/cmd/openshift-install/gather.go index a0060b12f30..842f39eba59 100644 --- a/cmd/openshift-install/gather.go +++ b/cmd/openshift-install/gather.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "io/ioutil" "net" "os" "path/filepath" @@ -20,6 +21,7 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" assetstore "github.com/openshift/installer/pkg/asset/store" + "github.com/openshift/installer/pkg/asset/tls" "github.com/openshift/installer/pkg/gather/ssh" "github.com/openshift/installer/pkg/terraform" gatheraws "github.com/openshift/installer/pkg/terraform/gather/aws" @@ -85,18 +87,35 @@ func newGatherBootstrapCmd() *cobra.Command { } func runGatherBootstrapCmd(directory string) error { - tfStateFilePath := filepath.Join(directory, terraform.StateFileName) - _, err := os.Stat(tfStateFilePath) - if os.IsNotExist(err) { - return unSupportedPlatformGather(directory) + assetStore, err := assetstore.NewStore(directory) + if err != nil { + return errors.Wrap(err, "failed to create asset store") + } + // add the default bootstrap key pair to the sshKeys list + bootstrapSSHKeyPair := &tls.BootstrapSSHKeyPair{} + if err := assetStore.Fetch(bootstrapSSHKeyPair); err != nil { + return errors.Wrapf(err, "failed to fetch %s", bootstrapSSHKeyPair.Name()) } + tmpfile, err := ioutil.TempFile("", "bootstrap-ssh") if err != nil { return err } + defer os.Remove(tmpfile.Name()) + if _, err := tmpfile.Write(bootstrapSSHKeyPair.Private()); err != nil { + return err + } + if err := tmpfile.Close(); err != nil { + return err + } + gatherBootstrapOpts.sshKeys = append(gatherBootstrapOpts.sshKeys, tmpfile.Name()) - assetStore, err := assetstore.NewStore(directory) + tfStateFilePath := filepath.Join(directory, terraform.StateFileName) + _, err = os.Stat(tfStateFilePath) + if os.IsNotExist(err) { + return unSupportedPlatformGather(directory) + } if err != nil { - return errors.Wrap(err, "failed to create asset store") + return err } config := &installconfig.InstallConfig{} @@ -136,7 +155,11 @@ func logGatherBootstrap(bootstrap string, port int, masters []string, directory if err := ssh.PullFileTo(client, fmt.Sprintf("/home/core/log-bundle-%s.tar.gz", gatherID), file); err != nil { return errors.Wrap(err, "failed to pull log file from remote") } - logrus.Infof("Bootstrap gather logs captured here %q", file) + path, err := filepath.Abs(file) + if err != nil { + return errors.Wrap(err, "failed to stat log file") + } + logrus.Infof("Bootstrap gather logs captured here %q", path) return nil } diff --git a/cmd/openshift-install/main.go b/cmd/openshift-install/main.go index e314f31d200..be1c8ee5fa0 100644 --- a/cmd/openshift-install/main.go +++ b/cmd/openshift-install/main.go @@ -54,6 +54,7 @@ func installerMain() { newVersionCmd(), newGraphCmd(), newCompletionCmd(), + newMigrateCmd(), } { rootCmd.AddCommand(subCmd) } diff --git a/cmd/openshift-install/migrate.go b/cmd/openshift-install/migrate.go new file mode 100644 index 00000000000..4c3d0a59ec0 --- /dev/null +++ b/cmd/openshift-install/migrate.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/spf13/cobra" + + azure "github.com/openshift/installer/cmd/openshift-install/migrate/azure" +) + +func newMigrateCmd() *cobra.Command { + migrateCmd := &cobra.Command{ + Use: "migrate", + Short: "Do a migration", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return cmd.Help() + }, + } + + migrateCmd.AddCommand(azure.NewMigrateAzurePrivateDNSEligibleCmd()) + migrateCmd.AddCommand(azure.NewMigrateAzurePrivateDNSMigrateCmd()) + + return migrateCmd +} diff --git a/cmd/openshift-install/migrate/azure/eligible.go b/cmd/openshift-install/migrate/azure/eligible.go new file mode 100644 index 00000000000..60c352211e5 --- /dev/null +++ b/cmd/openshift-install/migrate/azure/eligible.go @@ -0,0 +1,23 @@ +package azure + +import ( + "github.com/spf13/cobra" + + azmigrate "github.com/openshift/installer/pkg/migrate/azure" +) + +func runMigrateAzurePrivateDNSEligibleCmd(cmd *cobra.Command, args []string) error { + return azmigrate.Eligible() +} + +// NewMigrateAzurePrivateDNSEligibleCmd adds the eligble command to openshift-install +func NewMigrateAzurePrivateDNSEligibleCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "azure-privatedns-eligible", + Short: "Show legacy Azure zones that are eligible to be migrated", + Long: "This will show legacy Azure private zones that can be migrated to new private zones.", + RunE: runMigrateAzurePrivateDNSEligibleCmd, + } + + return cmd +} diff --git a/cmd/openshift-install/migrate/azure/migrate.go b/cmd/openshift-install/migrate/azure/migrate.go new file mode 100644 index 00000000000..04144634020 --- /dev/null +++ b/cmd/openshift-install/migrate/azure/migrate.go @@ -0,0 +1,53 @@ +package azure + +import ( + "github.com/pkg/errors" + "github.com/spf13/cobra" + + azmigrate "github.com/openshift/installer/pkg/migrate/azure" +) + +var ( + azureMigrateOpts struct { + zone string + resourceGroup string + virtualNetwork string + vnetResourceGroup string + link bool + } +) + +func runMigrateAzurePrivateDNSMigrateCmd(cmd *cobra.Command, args []string) error { + if azureMigrateOpts.zone == "" { + return errors.New("zone is a required argument") + } + if azureMigrateOpts.resourceGroup == "" { + return errors.New("resource-group is a required argument") + } + if azureMigrateOpts.link == true && azureMigrateOpts.virtualNetwork == "" { + return errors.New("link requires virtual-network to be set") + } + if azureMigrateOpts.virtualNetwork != "" && azureMigrateOpts.vnetResourceGroup == "" { + return errors.New("virtual-network requires virtual-network-resource-group to be set") + } + + return azmigrate.Migrate(azureMigrateOpts.resourceGroup, azureMigrateOpts.zone, azureMigrateOpts.virtualNetwork, azureMigrateOpts.vnetResourceGroup, azureMigrateOpts.link) +} + +// NewMigrateAzurePrivateDNSMigrateCmd adds the migrate command to openshift-install +func NewMigrateAzurePrivateDNSMigrateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "azure-privatedns", + Short: "Migrate a legacy Azure zone", + Long: "This will migrate a legacy Azure private zone to a new style private zone.", + RunE: runMigrateAzurePrivateDNSMigrateCmd, + } + + cmd.PersistentFlags().StringVar(&azureMigrateOpts.zone, "zone", "", "The zone to migrate") + cmd.PersistentFlags().StringVar(&azureMigrateOpts.resourceGroup, "resource-group", "", "The resource group of the zone") + cmd.PersistentFlags().StringVar(&azureMigrateOpts.virtualNetwork, "virtual-network", "", "The virtual network to create the private zone in") + cmd.PersistentFlags().StringVar(&azureMigrateOpts.vnetResourceGroup, "virtual-network-resource-group", "", "The resource group the virtual network is in") + cmd.PersistentFlags().BoolVar(&azureMigrateOpts.link, "link", false, "Link the newly created private zone to the virtual network") + + return cmd +} diff --git a/cmd/openshift-install/waitfor.go b/cmd/openshift-install/waitfor.go index a33a6b1d10d..9a7628570ad 100644 --- a/cmd/openshift-install/waitfor.go +++ b/cmd/openshift-install/waitfor.go @@ -4,6 +4,7 @@ import ( "context" "path/filepath" + timer "github.com/openshift/installer/pkg/metrics/timer" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -34,6 +35,7 @@ func newWaitForBootstrapCompleteCmd() *cobra.Command { Short: "Wait until cluster bootstrapping has completed", Args: cobra.ExactArgs(0), Run: func(_ *cobra.Command, _ []string) { + timer.StartTimer(timer.TotalTimeElapsed) ctx := context.Background() cleanup := setupFileHook(rootOpts.dir) @@ -43,7 +45,7 @@ func newWaitForBootstrapCompleteCmd() *cobra.Command { if err != nil { logrus.Fatal(errors.Wrap(err, "loading kubeconfig")) } - + timer.StartTimer("Bootstrap Complete") err = waitForBootstrapComplete(ctx, config, rootOpts.dir) if err != nil { if err2 := logClusterOperatorConditions(ctx, config); err2 != nil { @@ -56,6 +58,9 @@ func newWaitForBootstrapCompleteCmd() *cobra.Command { } logrus.Info("It is now safe to remove the bootstrap resources") + timer.StopTimer("Bootstrap Complete") + timer.StopTimer(timer.TotalTimeElapsed) + timer.LogSummary() }, } } @@ -66,6 +71,7 @@ func newWaitForInstallCompleteCmd() *cobra.Command { Short: "Wait until the cluster is ready", Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { + timer.StartTimer(timer.TotalTimeElapsed) ctx := context.Background() cleanup := setupFileHook(rootOpts.dir) @@ -84,6 +90,8 @@ func newWaitForInstallCompleteCmd() *cobra.Command { logrus.Fatal(err) } + timer.StopTimer(timer.TotalTimeElapsed) + timer.LogSummary() }, } } diff --git a/data/data/aws/bootstrap/main.tf b/data/data/aws/bootstrap/main.tf index c5e473ea60d..b8633d924f3 100644 --- a/data/data/aws/bootstrap/main.tf +++ b/data/data/aws/bootstrap/main.tf @@ -135,7 +135,7 @@ resource "aws_instance" "bootstrap" { tags = merge( { - "Name" = "${var.cluster_id}-bootstrap" + "Name" = "${var.cluster_id}-bootstrap" }, var.tags, ) @@ -150,7 +150,7 @@ resource "aws_instance" "bootstrap" { volume_tags = merge( { - "Name" = "${var.cluster_id}-bootstrap-vol" + "Name" = "${var.cluster_id}-bootstrap-vol" }, var.tags, ) @@ -174,7 +174,7 @@ resource "aws_security_group" "bootstrap" { tags = merge( { - "Name" = "${var.cluster_id}-bootstrap-sg" + "Name" = "${var.cluster_id}-bootstrap-sg" }, var.tags, ) diff --git a/data/data/aws/iam/main.tf b/data/data/aws/iam/main.tf index eceb9ec0809..f4368c4d5a8 100644 --- a/data/data/aws/iam/main.tf +++ b/data/data/aws/iam/main.tf @@ -39,6 +39,13 @@ EOF } resource "aws_iam_role_policy" "worker_policy" { + + // List curated from https://github.com/kubernetes/cloud-provider-aws#readme, minus entries specific to EKS + // integrations. + // This list should not be updated any further without an operator owning migrating changes here for existing + // clusters. + // Please see: docs/dev/aws/iam_permissions.md + name = "${var.cluster_id}-worker-policy" role = aws_iam_role.worker_role.id @@ -48,7 +55,10 @@ resource "aws_iam_role_policy" "worker_policy" { "Statement": [ { "Effect": "Allow", - "Action": "ec2:Describe*", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions" + ], "Resource": "*" } ] diff --git a/data/data/aws/main.tf b/data/data/aws/main.tf index 284c7bbb0c3..755793d5940 100644 --- a/data/data/aws/main.tf +++ b/data/data/aws/main.tf @@ -10,15 +10,22 @@ locals { provider "aws" { region = var.aws_region - # Validation of AWS Bahrain region was added in AWS TF provider v2.22 - # so we skip when installing in me-south-1. - skip_region_validation = var.aws_region == "me-south-1" + skip_region_validation = var.aws_skip_region_validation + + endpoints { + ec2 = lookup(var.custom_endpoints, "ec2", null) + elb = lookup(var.custom_endpoints, "elasticloadbalancing", null) + iam = lookup(var.custom_endpoints, "iam", null) + route53 = lookup(var.custom_endpoints, "route53", null) + s3 = lookup(var.custom_endpoints, "s3", null) + sts = lookup(var.custom_endpoints, "sts", null) + } } module "bootstrap" { source = "./bootstrap" - ami = var.aws_ami + ami = var.aws_region == var.aws_ami_region ? var.aws_ami : aws_ami_copy.imported[0].id instance_type = var.aws_bootstrap_instance_type cluster_id = var.cluster_id ignition = var.ignition_bootstrap @@ -26,7 +33,7 @@ module "bootstrap" { target_group_arns = module.vpc.aws_lb_target_group_arns target_group_arns_length = module.vpc.aws_lb_target_group_arns_length vpc_id = module.vpc.vpc_id - vpc_cidrs = module.vpc.vpc_cidrs + vpc_cidrs = var.machine_v4_cidrs vpc_security_group_ids = [module.vpc.master_sg_id] volume_kms_key_id = var.aws_master_root_volume_kms_key_id publish_strategy = var.aws_publish_strategy @@ -53,7 +60,7 @@ module "masters" { root_volume_kms_key_id = var.aws_master_root_volume_kms_key_id target_group_arns = module.vpc.aws_lb_target_group_arns target_group_arns_length = module.vpc.aws_lb_target_group_arns_length - ec2_ami = var.aws_ami + ec2_ami = var.aws_region == var.aws_ami_region ? var.aws_ami : aws_ami_copy.imported[0].id user_data_ign = var.ignition_master publish_strategy = var.aws_publish_strategy } @@ -84,7 +91,7 @@ module "dns" { module "vpc" { source = "./vpc" - cidr_block = var.machine_cidr + cidr_blocks = var.machine_v4_cidrs cluster_id = var.cluster_id region = var.aws_region vpc = var.aws_vpc @@ -102,3 +109,20 @@ module "vpc" { tags = local.tags } +resource "aws_ami_copy" "imported" { + count = var.aws_region != var.aws_ami_region ? 1 : 0 + name = "${var.cluster_id}-master" + source_ami_id = var.aws_ami + source_ami_region = var.aws_ami_region + encrypted = true + + tags = merge( + { + "Name" = "${var.cluster_id}-ami-${var.aws_region}" + "sourceAMI" = var.aws_ami + "sourceRegion" = var.aws_ami_region + }, + local.tags, + ) +} + diff --git a/data/data/aws/master/main.tf b/data/data/aws/master/main.tf index a1ccccfd8f6..2d7cf0159e7 100644 --- a/data/data/aws/master/main.tf +++ b/data/data/aws/master/main.tf @@ -43,6 +43,13 @@ EOF } resource "aws_iam_role_policy" "master_policy" { + + // List curated from https://github.com/kubernetes/cloud-provider-aws#readme, minus entries specific to EKS + // integrations. + // This list should not be updated any further without an operator owning migrating changes here for existing + // clusters. + // Please see: docs/dev/aws/iam_permissions.md + name = "${var.cluster_id}-master-policy" role = aws_iam_role.master_role.id @@ -51,24 +58,57 @@ resource "aws_iam_role_policy" "master_policy" { "Version": "2012-10-17", "Statement": [ { - "Action": "ec2:*", - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": "iam:PassRole", - "Resource": "*", - "Effect": "Allow" - }, - { - "Action" : [ - "s3:GetObject" + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "kms:DescribeKey" ], - "Resource": "arn:${data.aws_partition.current.partition}:s3:::*", - "Effect": "Allow" - }, - { - "Action": "elasticloadbalancing:*", "Resource": "*", "Effect": "Allow" } @@ -81,17 +121,17 @@ EOF resource "aws_network_interface" "master" { count = var.instance_count subnet_id = var.az_to_subnet_id[var.availability_zones[count.index]] - + security_groups = var.master_sg_ids - + tags = merge( { - "Name" = "${var.cluster_id}-master-${count.index}" + "Name" = "${var.cluster_id}-master-${count.index}" }, var.tags, ) } - + resource "aws_instance" "master" { count = var.instance_count ami = var.ec2_ami @@ -114,7 +154,7 @@ resource "aws_instance" "master" { tags = merge( { - "Name" = "${var.cluster_id}-master-${count.index}" + "Name" = "${var.cluster_id}-master-${count.index}" }, var.tags, ) @@ -129,7 +169,7 @@ resource "aws_instance" "master" { volume_tags = merge( { - "Name" = "${var.cluster_id}-master-${count.index}-vol" + "Name" = "${var.cluster_id}-master-${count.index}-vol" }, var.tags, ) diff --git a/data/data/aws/variables-aws.tf b/data/data/aws/variables-aws.tf index faa3c2c92f8..859c4f4972b 100644 --- a/data/data/aws/variables-aws.tf +++ b/data/data/aws/variables-aws.tf @@ -7,21 +7,39 @@ EOF default = "1.0" } +variable "custom_endpoints" { + type = map(string) + + description = <` | The VIP to use for internal API communication. | @@ -200,14 +224,14 @@ The `dnsVIP` setting has no default and must always be provided. ##### Describing Hosts The `hosts` parameter is a list of separate bare metal assets that -should be used to build the cluster. +should be used to build the cluster. The number of assets must be at least greater or equal to the sum of the configured `ControlPlane` and `compute` `Replicas`. | Name | Default | Description | | --- | --- | --- | -| `name` | | The name of the `BareMetalHost` resource to associate with the details. | +| `name` | | The name of the `BareMetalHost` resource to associate with the details. It must be unique. | | `role` | | Either `master` or `worker`. | | `bmc` | | Connection details for the baseboard management controller. See below for details. | -| `bootMACAddress` | | The MAC address of the NIC the host will use to boot on the provisioning network. | +| `bootMACAddress` | | The MAC address of the NIC the host will use to boot on the provisioning network. It must be unique. | The `bmc` parameter for each host is a set of values for accessing the baseboard management controller in the host. @@ -216,7 +240,7 @@ baseboard management controller in the host. | --- | --- | --- | | `username` | | The username for authenticating to the BMC | | `password` | | The password associated with `username`. | -| `address` | | The URL for communicating with the BMC controller, based on the provider being used. See [BMC Addressing](#bmc-addressing) for details. | +| `address` | | The URL for communicating with the BMC controller, based on the provider being used. See [BMC Addressing](#bmc-addressing) for details. It must be unique. | ##### BMC Addressing diff --git a/docs/user/openstack/README.md b/docs/user/openstack/README.md index b29f0ec706a..c37dc20efa6 100644 --- a/docs/user/openstack/README.md +++ b/docs/user/openstack/README.md @@ -50,16 +50,18 @@ In order to run the latest version of the installer in OpenStack, at a bare mini For a successful installation it is required: -- Floating IPs: 2 +- Floating IPs: 2 (plus one that will be created and destroyed by the Installer during the installation process) - Security Groups: 3 - Security Group Rules: 60 - Routers: 1 - Subnets: 1 +- Server Groups: 1 - RAM: 112 GB - vCPUs: 28 - Volume Storage: 175 GB - Instances: 7 - Depending on the type of [image registry backend](#image-registry-requirements) either 1 Swift container or an additional 100 GB volume. +- OpenStack resource tagging You may need to increase the security group related quotas from their default values. For example (as an OpenStack administrator): @@ -508,7 +510,7 @@ curl https://:22623/config/master --insecure If you ran the installer with a [custom CA certificate](#self-signed-openstack-ca-certificates), then this certificate can be changed while the cluster is running. To change your certificate, edit the value of the `ca-cert.pem` key in the `cloud-provider-config` configmap with a valid PEM certificate. ```sh -oc edit -n openshift-config cloud-provider-config +oc edit configmap -n openshift-config cloud-provider-config ``` ## Reporting Issues diff --git a/docs/user/openstack/customization.md b/docs/user/openstack/customization.md index e5eb094fa9c..f3717a78449 100644 --- a/docs/user/openstack/customization.md +++ b/docs/user/openstack/customization.md @@ -10,6 +10,8 @@ Beyond the [platform-agnostic `install-config.yaml` properties](../customization * [Minimal](#minimal) * [Custom-machine-pools](#custom-machine-pools) * [Image Overrides](#image-overrides) +* [Additional Networks](#additional-networks) +* [Additional Security Groups](#additional-security-groups) * [Further customization](#further-customization) ## Cluster-scoped properties @@ -24,14 +26,21 @@ Beyond the [platform-agnostic `install-config.yaml` properties](../customization * `region` (deprecated string): The OpenStack region where the cluster will be created. Currently this value is not used by the installer. * `trunkSupport` (optional string): Whether OpenStack ports can be trunked (`1` for true or `0` for false) * `clusterOSImage` (optional string): Either a URL with `http(s)` or `file` scheme to override the default OS image for cluster nodes or an existing Glance image name. +* `apiVIP` (optional string): An IP addresss on the machineNetwork that will be assigned to the API VIP. Be aware that the `10` and `11` of the machineNetwork will be taken by neutron dhcp by default, and wont be available. +* `ingressVIP` (optional string): An IP address on the machineNetwork that will be assigned to the ingress VIP. Be aware that the `10` and `11` of the machineNetwork will be taken by neutron dhcp by default, and wont be available. +* `machinesSubnet` (optional string): the UUID of an openstack subnet to install the nodes of the cluster onto. The first CIDR in `networks.machineNetwork` must match the cidr of the `machinesSubnet`. Also note that setting `externalDNS` while setting `machinesSubnet` is invalid usage. If you want to add a DNS to your cluster while using a custom subnet, add it to the subnet in openstack [like this](https://docs.openstack.org/neutron/rocky/admin/config-dns-res.html). ## Machine pools +* `additionalNetworkIDs` (optional list of strings): IDs of additional networks for machines. +* `additionalSecurityGroupIDs` (optional list of strings): IDs of additional security groups for machines. * `type` (optional string): The OpenStack flavor name for machines in the pool. * `rootVolume` (optional object): Defines the root volume for instances in the machine pool. The instances use ephemeral disks if not set. * `size` (required integer): Size of the root volume in GB. * `type` (required string): The volume pool to create the volume from. +**NOTE:** The bootstrap node follows the `type` and `rootVolume` parameters from the `controlPlane` machine pool. + ## Examples Some example `install-config.yaml` are shown below. @@ -128,6 +137,66 @@ platform: clusterOSImage: my-rhcos ``` +## Additional Networks + +You can set additional networks for your machines by defining `additionalNetworkIDs` parameter in the machine configuration. The parameter is a list of strings with additional network IDs: + +```yaml +additionalNetworkIDs: +- +- +``` + +You can attach this parameter for both `controlPlane` and `compute` machines: + +Example: + +```yaml +compute: +- name: worker + platform: + openstack: + additionalNetworkIDs: + - fa806b2f-ac49-4bce-b9db-124bc64209bf +controlPlane: + name: master + platform: + openstack: + additionalNetworkIDs: + - fa806b2f-ac49-4bce-b9db-124bc64209bf +``` + +**NOTE:** Allowed address pairs won't be created for the additional networks. + +## Additional Security Groups + +You can set additional security groups for your machines by defining `additionalSecurityGroupIDs` parameter in the machine configuration. The parameter is a list of strings with additional security group IDs: + +```yaml +additionalSecurityGroupIDs: +- +- +``` + +You can attach this parameter for both `controlPlane` and `compute` machines: + +Example: + +```yaml +compute: +- name: worker + platform: + openstack: + additionalSecurityGroupIDs: + - 7ee219f3-d2e9-48a1-96c2-e7429f1b0da7 +controlPlane: + name: master + platform: + openstack: + additionalSecurityGroupIDs: + - 7ee219f3-d2e9-48a1-96c2-e7429f1b0da7 +``` + ## Further customization For customizing the installation beyond what is possible with `openshift-install`, refer to the [UPI (User Provided Infrastructure) documentation](./install_upi.md). diff --git a/docs/user/openstack/install_upi.md b/docs/user/openstack/install_upi.md index 74ff3a9a54b..03d625f575b 100644 --- a/docs/user/openstack/install_upi.md +++ b/docs/user/openstack/install_upi.md @@ -662,7 +662,7 @@ In this section we'll create all the networking pieces necessary to host the Ope ### Security Groups ```sh -$ ansible-playbook -i inventory.yaml 01_security-groups.yaml +$ ansible-playbook -i inventory.yaml security-groups.yaml ``` The playbook creates one Security group for the Control Plane and one for the Compute nodes, then attaches rules for enabling communication between the nodes. @@ -670,7 +670,7 @@ The playbook creates one Security group for the Control Plane and one for the Co ### Network, Subnet and external router ```sh -$ ansible-playbook -i inventory.yaml 02_network.yaml +$ ansible-playbook -i inventory.yaml network.yaml ``` The playbook creates a network and a subnet. The subnet obeys `os_subnet_range`; however the first ten IP addresses are removed from the allocation pool. These addresses will be used for the VRRP addresses managed by keepalived for high availability. For more information, read the [networking infrastructure design document][net-infra]. @@ -698,7 +698,7 @@ $ openstack subnet set --dns-nameserver <198.51.100.86> --dns-nameserver <198.51 ## Bootstrap ```sh -$ ansible-playbook -i inventory.yaml 03_bootstrap.yaml +$ ansible-playbook -i inventory.yaml bootstrap.yaml ``` The playbook sets the *allowed address pairs* on each port attached to our OpenShift nodes. @@ -723,7 +723,7 @@ $ ssh core@203.0.113.24 ## Control Plane ```sh -$ ansible-playbook -i inventory.yaml 04_control-plane.yaml +$ ansible-playbook -i inventory.yaml control-plane.yaml ``` Our control plane will consist of three nodes. The servers will be passed the `master-?-ignition.json` files prepared earlier. @@ -776,7 +776,7 @@ $ oc get pods -A ### Delete the Bootstrap Resources ```sh -$ ansible-playbook -i inventory.yaml down-03_bootstrap.yaml +$ ansible-playbook -i inventory.yaml down-bootstrap.yaml ``` The teardown playbook deletes the bootstrap port, server and floating IP address. @@ -786,7 +786,7 @@ If you haven't done so already, you should also disable the bootstrap Ignition U ## Compute Nodes ```sh -$ ansible-playbook -i inventory.yaml 05_compute-nodes.yaml +$ ansible-playbook -i inventory.yaml compute-nodes.yaml ``` This process is similar to the masters, but the workers need to be approved before they're allowed to join the cluster. @@ -891,14 +891,17 @@ Upon success, it will print the URL to the OpenShift Console (the web UI) as wel ```sh $ ansible-playbook -i inventory.yaml \ - down-03_bootstrap.yaml \ - down-04_control-plane.yaml \ - down-05_compute-nodes.yaml \ - down-06_load-balancers.yaml \ - down-02_network.yaml \ - down-01_security-groups.yaml + down-bootstrap.yaml \ + down-control-plane.yaml \ + down-compute-nodes.yaml \ + down-load-balancers.yaml \ + down-network.yaml \ + down-security-groups.yaml ``` -The playbook `down-06_load-balancers.yaml` idempotently deletes the load balancers created by the Kuryr installation, if any. +The playbook `down-load-balancers.yaml` idempotently deletes the load balancers created by the Kuryr installation, if any. + +**NOTE:** The deletion of load balancers with `provisioning_status` `PENDING-*` is skipped. Make sure to retry the +`down-load-balancers.yaml` playbook once the load balancers have transitioned to `ACTIVE`. Then, remove the `api` and `*.apps` DNS records. diff --git a/docs/user/openstack/known-issues.md b/docs/user/openstack/known-issues.md index 4dc0efe534d..12acdcaf102 100644 --- a/docs/user/openstack/known-issues.md +++ b/docs/user/openstack/known-issues.md @@ -10,6 +10,14 @@ If the mDNS service name of a server is too long, it will exceed the character l Since the installer requires the *Name* of your external network and Red Hat Core OS image, if you have other networks or images with the same name, it will choose one randomly from the set. This is not a reliable way to run the installer. We highly recommend that you resolve this with your cluster administrator by creating unique names for your resources in openstack. +## Extended installation times + +Depending on the infrastructure performance, the installation may take longer than what the global installer timeout expects. In those cases, the installer will fail, but the cluster might still converge to a working state. In case of timeout, if such a case is suspected, it is advised to check the cluster health manually after some time: + +```shell +$ openshift-install wait-for install-complete +``` + ## External Network Overlap If your external network's CIDR range is the same as one of the default network ranges, then you will need to change the matching network range by running the installer with a custom `install-config.yaml`. If users are experiencing unusual networking problems, please contact your cluster administrator and validate that none of your network CIDRs are overlapping with the external network. We were unfortunately unable to support validation for this due to a lack of support in gophercloud, and even if we were, it is likely that the CIDR range of the floating ip would only be accessible cluster administrators. The default network CIDR are as follows: diff --git a/docs/user/power/install_upi.md b/docs/user/power/install_upi.md new file mode 100644 index 00000000000..247fcdc6647 --- /dev/null +++ b/docs/user/power/install_upi.md @@ -0,0 +1,296 @@ +# Install: Power User Provided Infrastructure + +The steps for performing a UPI-based install are outlined here. Example automation is provided [here](#example-power-upi-configuration) to help model your own. + +## Table of contents + +1. [Minimum compute requirements](#minimum-resource-requirements) + +2. [Network topology requirements](#network-topology-requirements) + +3. [DNS requirements](#dns-requirements) + +4. [Getting Ignition configs for machines](#getting-ignition-configs-for-machines) + +5. [Booting machines with RHCOS and Ignition configs](#booting-machines-with-rhcos-and-ignition-configs) + +6. [Watching your installation (bootstrap complete, cluster available)](#watching-your-installation) + +7. [Example Bare-Metal UPI deployment](#example-power-upi-configuration) + +## Compute + +The smallest OpenShift 4.x clusters require the following host: + +* 1 bootstrap machine. + +* 3 control plane machines. + +* atleast 1 worker machine. + +NOTE: The cluster requires the bootstrap machine to deploy the OpenShift cluster on to the 3 control plane machines, and you can remove the bootstrap machine. + +All the machines must use Red Hat Enterprise Linux CoreOS (RHCOS) as the operating system. + +### Minimum resource requirements + +| Machine | Operating System | vCPU | RAM | Storage | +|---------------|------------------|------|-------|---------| +| Bootstrap | RHCOS | 2 | 16 GB | 120 GB | +| Control Plane | RHCOS | 2 | 16 GB | 120 GB | +| Compute | RHCOS | 2 | 16 GB | 120 GB | + + + +## Network Topology Requirements + +The easiest way to get started is to ensure all Power nodes have internet access to pull images for platform containers and provide telemetry data to Red Hat. +OpenShift 4.x also supports air-gapped installs + +### Load balancers + +Before you install OpenShift, you must provision two load balancers. + +* A load balancer for the control plane and bootstrap machines that targets port 6443 (Kubernetes APIServer) and 22623([Machine Config server][machine-config-server]). Port 6443 must be accessible to both clients external to the cluster and nodes within the cluster, and port 22623 must be accessible to nodes within the cluster. + + NOTE: Bootstrap machine can be deleted after cluster installation is finished. + +* A load balancer for the machines that run the [ingress router][openshift-router] pods that balances ports 443 and 80. Both the ports must be accessible to both clients external to the cluster and nodes within the cluster. + + NOTE: A working configuration for the ingress router is required for an OpenShift 4.x cluster. + + NOTE: The default configuration for Cluster Ingress Operator deploys the ingress router to `worker` nodes in the cluster. The administrator needs to configure the [ingress][openshift-router] after the control plane has been bootstrapped. + +### Connectivity between machines + +You must configure the network connectivity between machines to allow cluster components to communicate. + +* etcd + + As the etcd members are located on the control plane machines. Each control plane machine requires connectivity to [etcd server][etcd-ports], [etcd peer][etcd-ports] and [etcd-metrics][etcd-ports] on every other control plane machine. + +* OpenShift SDN + + All the machines require connectivity to certain reserved ports on every other machine to establish in-cluster networking. For further detail, please refer to the following [documentation][sdn-ports]. + +* Kubernetes NodePort + + All the machines require connectivity to Kubernetes NodePort range 30000-32767 on every other machine for OpenShift platform components. + +* OpenShift reserved + + All the machines require connectivity to reserved port ranges 10250-12252 and 9000-9999 on every other machine for OpenShift platform components. + +## DNS requirements + +* Kubernetes API + + OpenShift 4.x requires the DNS records `api.$cluster_name.$base_domain` and `api-int.$cluster_name.$base_domain` to point to the load balancer targeting the control plane machines. Both records must be resolvable from all the nodes within the cluster. The `api.$cluster_name.$base_domain` must also be resolvable by clients external to the cluster. + +* etcd + + For each control plane machine, OpenShift 4.x requires DNS records `etcd-$idx.$cluster_name.$base_domain` to point to `$idx`'th control plane machine. The DNS record must resolve to an unicast IPV4 address for the control plane machine and the records must be resolvable from all the nodes in the cluster. + + For each control plane machine, OpenShift 4.x also requires a SRV DNS record for etcd server on that machine with priority `0`, weight `10` and port `2380`. For 3 control plane cluster, the records look like: + + ```plain + # _service._proto.name. TTL class SRV priority weight port target. + _etcd-server-ssl._tcp.$cluster_name.$base_domain 86400 IN SRV 0 10 2380 etcd-0.$cluster_name.$base_domain. + _etcd-server-ssl._tcp.$cluster_name.$base_domain 86400 IN SRV 0 10 2380 etcd-1.$cluster_name.$base_domain. + _etcd-server-ssl._tcp.$cluster_name.$base_domain 86400 IN SRV 0 10 2380 etcd-2.$cluster_name.$base_domain. + ``` + +* OpenShift Routes + + OpenShift 4.x requires the DNS record `*.apps.$cluster_name.$base_domain` to point to the load balancer targeting the machines running the ingress router pods. This record must be resolvable by both clients external to the cluster and from all the nodes within the cluster. + +## Getting Ignition configs for machines + +The OpenShift Installer provides administrators various assets that are required to create an OpenShift cluster, namely: + +* Ignition configs: The OpenShift Installer provides Ignition configs that should be used to configure the RHCOS based bootstrap and control plane machines using `bootstrap.ign` and `master.ign` respectively. The OpenShift Installer also provides `worker.ign` that can be used to configure the RHCOS based `worker` machines. + +* Admin Kubeconfig: The OpenShift Installer provides a kubeconfig with admin level privileges to Kubernetes APIServer. + + NOTE: This kubeconfig is configured to use `api.$cluster_name.$base_domain` DNS name to communicate with the Kubernetes APIServer. + +### Setting up install-config for installer + +The OpenShift installer uses an [Install Config](../customization.md#platform-customization) to drive all install time configuration. + +An example install config for bare-metal UPI is as follows: + +```yaml +apiVersion: v1 +## The base domain of the cluster. All DNS records will be sub-domains of this base and will also include the cluster name. +baseDomain: example.com +compute: +- name: worker + replicas: 1 +controlPlane: + name: master + replicas: 3 +metadata: + ## The name for the cluster + name: test +platform: + none: {} +## The pull secret that provides components in the cluster access to images for OpenShift components. +pullSecret: '' +## The default SSH key that will be programmed for `core` user. +sshKey: '' +``` + +Create a directory that will be used by the OpenShift installer to provide all the assets. For example `test-bare-metal`, + +```console +$ mkdir test-bare-metal +$ tree test-bare-metal +test-bare-metal + +0 directories, 0 files +``` + +Copy *your* `install-config` to the `INSTALL_DIR`. For example using the `test-bare-metal` as our `INSTALL_DIR`, + +```console +$ cp test-bare-metal/install-config.yaml +$ tree test-bare-metal +test-bare-metal +└── install-config.yaml + +0 directories, 1 file +``` + +NOTE: The filename for `install-config` in the `INSTALL_DIR` must be `install-config.yaml` + +### Invoking the installer to get Ignition configs + +Given that you have setup the `INSTALL_DIR` with the appropriate `install-config`, you can create the Ignition configs by using the `create ignition-configs` target. For example, + +```console +$ openshift-install --dir test-bare-metal create ignition-configs +INFO Consuming "Install Config" from target directory +$ tree test-bare-metal +test-bare-metal +├── auth +│   └── kubeconfig +├── bootstrap.ign +├── master.ign +└── worker.ign + +1 directory, 4 files +``` + +The `bootstrap.ign`, `master.ign`, and `worker.ign` files must be made available as http/https file downloads resolvable by the RHCOS nodes. + +## Booting machines with RHCOS and Ignition configs + +### Required kernel parameters for boot +A kernel parameter file must be created for each node with the following parameters: + +* `rd.neednet=1`: [CoreOS Installer][coreos-installer] needs internet access to fetch the OS image that needs to be installed on the machine. + +* IP configuration [arguments](https://docs.openshift.com/container-platform/4.3/installing/installing_bare_metal/installing-bare-metal-network-customizations.html#network-customization-config-yaml_installing-bare-metal-network-customizations) may be required to access the network. + +* CoreOS Installer [arguments][coreos-installer-args] are required to be configured to install RHCOS and setup the Ignition config file for that machine. + +* Refer to the following docs for details on booting a PowerVM machine + - [iso boot](https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/Power+Systems/page/Mounting+an+ISO+image+on+VIO+client+LPAR) + - [network boot](https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/Power+Systems/page/How+to+initiate+network+boot+of+an+LPAR) + +## Watching your installation + +### Monitor for bootstrap-complete + +The administrators can use the `wait-for bootstrap-complete` target of the OpenShift Installer to monitor cluster bootstrapping. The command succeeds when it notices `bootstrap-complete` event from Kubernetes APIServer. This event is generated by the bootstrap machine after the Kubernetes APIServer has been bootstrapped on the control plane machines. For example, + +```console +$ openshift-install --dir test-bare-metal wait-for bootstrap-complete +INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443... +INFO API v1.16.2 up +INFO Waiting up to 30m0s for bootstrapping to complete... +``` + +### Configure Image Registry Storage Provisioner + + +The Cluster Image Registry [Operator][cluster-image-registry-operator] does not pick an storage backend for `None` platform. Therefore, the cluster operator will be stuck in progressing because it is waiting for the administrator to [configure][cluster-image-registry-operator-configuration] a storage backend for the image-registry. +[NFS][openshift-nfs] should be picked as a [storage-backend][nfs-storage-backend]. + + +#### Configuring NFS + +To make an existing NFS share accessible for OpenShift to use as persistent storage, users must first attach it as a Persistent Volume. At least 100GB of NFS storage space must be available for the image registry claim. + +``` +apiVersion: v1 +kind: PersistentVolume +spec: + accessModes: + - ReadWriteMany + - ReadWriteOnce + capacity: + storage: 100Gi + nfs: + path: + server: + persistentVolumeReclaimPolicy: Recycle + volumeMode: Filesystem +status: {} +``` + +Once the persistent volume is created, the image registry must be patched to use it. + +```sh +oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"pvc":{"claim":""}}, "managementState": "Managed"}}' +``` + +#### Configuring Local Storage (testing/development only) + +Alternatively, for non-production clusters, `emptyDir` can be used for testing instead of NFS. + +```sh +oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}, "managementState": "Managed"}}' +``` + + +## Monitor for cluster completion + +The administrators can use the `wait-for install-complete` target of the OpenShift Installer to monitor cluster completion. The command succeeds when it notices that Cluster Version Operator has completed rolling out the OpenShift cluster from Kubernetes APIServer. + +```console +$ openshift-install wait-for install-complete +INFO Waiting up to 30m0s for the cluster to initialize... +``` + +## Example Power UPI configuration + +An [example terraform configuration](https://github.com/ppc64le/ocp4_upi_powervm) for deploying a +self-contained, development/testing cluster on Power is available. This example +configuration demonstrates a minimal set of infrastructure services to bring +up a running cluster. It is not a production-ready configuration. + +The repository includes examples of the following user-provided components, +which are intended to serve as a guide for designing a user's cluster +topology. + +* DNS +* Load Balancing +* DHCP +* File Server (for Ignition configs) + +[cluster-image-registry-operator-configuration]: https://github.com/openshift/cluster-image-registry-operator#registry-resource +[cluster-image-registry-operator]: https://github.com/openshift/cluster-image-registry-operator#image-registry-operator +[coreos-installer-args]: https://github.com/coreos/coreos-installer#kernel-command-line-options-for-coreos-installer-running-in-the-initramfs +[coreos-installer]: https://github.com/coreos/coreos-installer#coreos-installer +[csr-requests]: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/#requesting-a-certificate +[etcd-ports]: https://github.com/openshift/origin/pull/21520 +[machine-config-server]: https://github.com/openshift/machine-config-operator/blob/master/docs/MachineConfigServer.md +[openshift-router]: https://github.com/openshift/cluster-ingress-operator#openshift-ingress-operator +[rrdns]: https://tools.ietf.org/html/rfc1794 +[sdn-ports]: https://github.com/openshift/origin/pull/21520 +[upi-metal-example-pre-req]: ../../../upi/metal/README.md#pre-requisites +[upi-metal-example]: ../../../upi/metal/README.md +[openshift-nfs]: https://docs.openshift.com/container-platform/4.3/storage/persistent_storage/persistent-storage-nfs.html +[nfs-storage-backend]: https://docs.openshift.com/container-platform/4.3/registry/configuring_registry_storage/configuring-registry-storage-baremetal.html diff --git a/go.mod b/go.mod index 7bcc8f77fa9..e445d170fd3 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,6 @@ require ( github.com/h2non/filetype v1.0.12 github.com/hashicorp/go-azure-helpers v0.10.0 github.com/hashicorp/go-plugin v1.0.1 - github.com/hashicorp/go-retryablehttp v0.6.4 // indirect github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/memberlist v0.1.5 // indirect github.com/hashicorp/serf v0.8.5 // indirect @@ -63,16 +62,16 @@ require ( github.com/metal3-io/cluster-api-provider-baremetal v0.0.0 github.com/mitchellh/cli v1.0.0 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect - github.com/openshift-metal3/terraform-provider-ironic v0.1.9 + github.com/openshift-metal3/terraform-provider-ironic v0.2.0 github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible - github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 + github.com/openshift/client-go v0.0.0-20200320150128-a906f3d8e723 github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156 github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200128081049-840376ca5c09 - github.com/openshift/library-go v0.0.0-20200210105614-4bf528465627 - github.com/openshift/machine-api-operator v0.2.1-0.20200310180732-c63fa2b143f0 + github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81 + github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290 github.com/openshift/machine-config-operator v4.2.0-alpha.0.0.20190917115525-033375cbe820+incompatible github.com/ovirt/go-ovirt v4.3.4+incompatible github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20200122105935-65b89ad00553 @@ -106,8 +105,8 @@ require ( go.uber.org/atomic v1.5.1 // indirect go.uber.org/multierr v1.4.0 // indirect go.uber.org/zap v1.13.0 // indirect - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f + golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 + golang.org/x/lint v0.0.0-20200302205851-738671d3881b golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect @@ -116,11 +115,11 @@ require ( gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f gopkg.in/ini.v1 v1.51.0 gopkg.in/yaml.v2 v2.2.8 - k8s.io/api v0.17.2 - k8s.io/apimachinery v0.17.3 + k8s.io/api v0.18.0 + k8s.io/apimachinery v0.18.0 k8s.io/client-go v12.0.0+incompatible k8s.io/klog v1.0.0 - k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc + k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 sigs.k8s.io/cluster-api-provider-aws v0.0.0 sigs.k8s.io/cluster-api-provider-azure v0.0.0 sigs.k8s.io/cluster-api-provider-openstack v0.0.0 @@ -134,7 +133,8 @@ replace ( github.com/hashicorp/terraform-plugin-sdk => github.com/openshift/hashicorp-terraform-plugin-sdk v1.6.0-openshift // Pin to fork with public rpc types github.com/metal3-io/baremetal-operator => github.com/openshift/baremetal-operator v0.0.0-20200206190020-71b826cc0f0a // Use OpenShift fork github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d // Pin OpenShift fork - github.com/openshift/api => github.com/openshift/api v0.0.0-20200210091934-a0e53e94816b // Pin API + github.com/openshift/api => github.com/openshift/api v0.0.0-20200413201024-c6e8c9b6eb9a // Pin API + github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 // Pin client-go github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20200130220348-e5685c0cf530 // Pin MCO so it doesn't get downgraded github.com/terraform-providers/terraform-provider-azurerm => github.com/openshift/terraform-provider-azurerm v1.41.1-openshift-3 // Pin to openshift fork with IPv6 fixes google.golang.org/api => google.golang.org/api v0.13.0 // Pin to version required by tf-provider-google @@ -162,5 +162,5 @@ replace ( k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.17.1 // Replaced by MCO/CRI-O sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200316201703-923caeb1d0d8 // Pin OpenShift fork sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b // Pin OpenShift fork - sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20200221124403-d699c3611b0c // Pin OpenShift fork + sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20200323110431-3311de91e078 // Pin OpenShift fork ) diff --git a/go.sum b/go.sum index aa86171d3c9..61a62af9720 100644 --- a/go.sum +++ b/go.sum @@ -551,6 +551,7 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= @@ -558,6 +559,7 @@ github.com/go-lintpack/lintpack v0.5.2 h1:DI5mA3+eKdWeJ40nU4d6Wc26qmdG8RCi/btYq0 github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-log/log v0.1.1-0.20181211034820-a514cf01a3eb/go.mod h1:4mBwpdRMFLiuXZDCwU2lKQFsoSCo72j3HqBK9d81N2M= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -778,6 +780,8 @@ github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598 github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= @@ -1729,6 +1733,8 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1737,6 +1743,7 @@ github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -1758,18 +1765,20 @@ github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pK github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/openshift-metal3/terraform-provider-ironic v0.1.9 h1:8Ylp+ylSX/NtuGel90Ty9Fyg+mq523B2orH1FtC+fG0= -github.com/openshift-metal3/terraform-provider-ironic v0.1.9/go.mod h1:gYfx1ioglruoE8BpF0h8MVbcAAFu6ruz+bD2X9VC0uU= -github.com/openshift/api v0.0.0-20200210091934-a0e53e94816b h1:BERD6sZj7w9Tt0RBpuw87AC0+SppyxEUgUG/Of5rI+E= -github.com/openshift/api v0.0.0-20200210091934-a0e53e94816b/go.mod h1:fT6U/JfG8uZzemTRwZA2kBDJP5nWz7v05UHnty/D+pk= +github.com/openshift-metal3/terraform-provider-ironic v0.2.0 h1:MAImxv6UaTtvf2BkPG9YS+EvIqMsXQhNQNDfV7FE2D0= +github.com/openshift-metal3/terraform-provider-ironic v0.2.0/go.mod h1:G79T6t60oBpYfZK/x960DRzYsNHdz5YVCHINx6QlmtU= +github.com/openshift/api v0.0.0-20200413201024-c6e8c9b6eb9a h1:fIIKps4VKnxrXSp3lhgSatm5C1xb1qfMtJsmyr3iMXw= +github.com/openshift/api v0.0.0-20200413201024-c6e8c9b6eb9a/go.mod h1:RKMJ5CBnljLfnej+BJ/xnOWc3kZDvJUaIAEq2oKSPtE= github.com/openshift/baremetal-operator v0.0.0-20200206190020-71b826cc0f0a h1:65ZuRkPnQGh9uo0z93KosrPlwEWJNxUjxnuM9lyGBHc= github.com/openshift/baremetal-operator v0.0.0-20200206190020-71b826cc0f0a/go.mod h1:cXwn0hhgHpORjBasg0RnZwhKaJGy9+r6qgj0HCXrs/Y= github.com/openshift/build-machinery-go v0.0.0-20200205161356-ef115f5adc73/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= +github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= github.com/openshift/client-go v0.0.0-20190617165122-8892c0adc000/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= -github.com/openshift/client-go v0.0.0-20191001081553-3b0e988f8cb0 h1:U0rtkdPj1lTC5iQwN3Ev+FgyZUTsJFg60rc0ExTGLpc= github.com/openshift/client-go v0.0.0-20191001081553-3b0e988f8cb0/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 h1:XYfJWv2Ch+qInGLDEedHRtDsJwnxyU1L8U7SY56NcA8= github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240/go.mod h1:4riOwdj99Hd/q+iAcJZfNCsQQQMwURnZV6RL4WHYS5w= +github.com/openshift/client-go v0.0.0-20200320150128-a906f3d8e723 h1:FfrELmZ9N9NtVE15qmTRkJIETX75QHdr65xiuTKvNYo= +github.com/openshift/client-go v0.0.0-20200320150128-a906f3d8e723/go.mod h1:wNBSSt4RZTHhUWyhBE3gxTR32QpF9DB2SfS14u2IxuE= github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e h1:2gyl9UVyjHSWzdS56KUXxQwIhENbq2x2olqoMQSA/C8= github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e/go.mod h1:iPn+uhIe7nkP5BMHe2QnbLtg5m/AIQ1xvz9s3cig5ss= github.com/openshift/cluster-api v0.0.0-20190805113604-f8de78af80fc/go.mod h1:mNsD1dsD4T57kV4/C6zTHke/Ro166xgnyyRZqkamiEU= @@ -1788,10 +1797,8 @@ github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e71 github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156/go.mod h1:KCyjaBfEkifs9bqV1HEXDJUyQylgeLSqiqt2QnMn7is= github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 h1:MC6BSZYxFPoqqKj9PdlGjHGVKcMsvn6Kv1NiVzQErZ8= github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603/go.mod h1:7pQ9Bzha+ug/5zd+0ufbDEcnn2OnNlPwRwYrzhXk4NM= -github.com/openshift/cluster-api-provider-openstack v0.0.0-20200130125124-ef82ce374112 h1:ny9quEJfUBkUyalPn4sRBX86Ci5Jx72pIO2Qjsxy0yA= -github.com/openshift/cluster-api-provider-openstack v0.0.0-20200130125124-ef82ce374112/go.mod h1:ntMRKZlv++TExGO4g2jgsVIaHKJt8kKe72BAvMPV5vA= -github.com/openshift/cluster-api-provider-openstack v0.0.0-20200221124403-d699c3611b0c h1:Rn/Ip2nbWUhvOF9/EZaorxKVcQnm427cSOJQJIFXuHQ= -github.com/openshift/cluster-api-provider-openstack v0.0.0-20200221124403-d699c3611b0c/go.mod h1:ntMRKZlv++TExGO4g2jgsVIaHKJt8kKe72BAvMPV5vA= +github.com/openshift/cluster-api-provider-openstack v0.0.0-20200323110431-3311de91e078 h1:Irj9ROcWhbeH6t2DEUDIpdIJgSLBaXww6AP/FMCmGmw= +github.com/openshift/cluster-api-provider-openstack v0.0.0-20200323110431-3311de91e078/go.mod h1:ntMRKZlv++TExGO4g2jgsVIaHKJt8kKe72BAvMPV5vA= github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200128081049-840376ca5c09 h1:QJxGgIB7f5BqNPEZOCgV29NsDf1P439Bs3q0B5O3fP8= github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200128081049-840376ca5c09/go.mod h1:NcvJT99IauPosghKCineBG8yswe9JjuddiWuvsqge64= github.com/openshift/cluster-autoscaler-operator v0.0.0-20190521201101-62768a6ba480/go.mod h1:/XmV44Fh28Vo3Ye93qFrxAbcFJ/Uy+7LPD+jGjmfJYc= @@ -1806,10 +1813,15 @@ github.com/openshift/library-go v0.0.0-20190619114638-6b58b672ee58/go.mod h1:NBt github.com/openshift/library-go v0.0.0-20191003152030-97c62d8a2901/go.mod h1:NBttNjZpWwup/nthuLbPAPSYC8Qyo+BBK5bCtFoyYjo= github.com/openshift/library-go v0.0.0-20200210105614-4bf528465627 h1:Rs1RtB123VJr+kqXBYOTERNp23tZUUZ6w1gWrkroH3M= github.com/openshift/library-go v0.0.0-20200210105614-4bf528465627/go.mod h1:T+sDdW3J/cgxUSqPdAwmhFrJhfFRv1ZtCSTVY59phN4= +github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81 h1:bNUcSdyoACkjI2USyvDbAMb6lCtghdz563b0bfhPC8A= +github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81/go.mod h1:Qc5duoXHzAKyUfA0REIlG/rdfWzknOpp9SiDiyg5Y7A= github.com/openshift/machine-api-operator v0.0.0-20190312153711-9650e16c9880/go.mod h1:7HeAh0v04zQn1L+4ItUjvpBQYsm2Nf81WaZLiXTcnkc= +github.com/openshift/machine-api-operator v0.2.0 h1:g+EIEZrbbE0C2zC2x4nddgA2oxqP/3sjkEGdiX2qNe8= github.com/openshift/machine-api-operator v0.2.1-0.20191128180243-986b771e661d/go.mod h1:9qQPF00anuIsc6RiHYfHE0+cZZImbvFNLln0NRBVVMg= github.com/openshift/machine-api-operator v0.2.1-0.20200310180732-c63fa2b143f0 h1:Na0422T5qq9e4AtBqH4hyqujESg29Akrf2asy/kc02U= github.com/openshift/machine-api-operator v0.2.1-0.20200310180732-c63fa2b143f0/go.mod h1:b3huCV+DbroXP1sHtsU5xBwx97zqc6GKB5owyl2zsNM= +github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290 h1:0QnZvWW2X/4fCmIlOWsm3FmHZnsh2sCBfsQE/ujGhsw= +github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290/go.mod h1:QkhH+/6BXabl+4HmiLwx9/bmW1ieCGF9km7xz22Ozl0= github.com/openshift/machine-config-operator v0.0.1-0.20200130220348-e5685c0cf530 h1:r9eSp963LcaLw3YUyJHMHwZYXoaGXOc2MOKVQQrdRmw= github.com/openshift/machine-config-operator v0.0.1-0.20200130220348-e5685c0cf530/go.mod h1:z3udws7UDLBp233iGbayvpZEwhWn74K9xzjDtCGJlok= github.com/openshift/runtime-utils v0.0.0-20191011150825-9169de69ebf6/go.mod h1:5gDRVvQwesU7cfwlpuMivdv3Dz/oslvv2qTBHCy4wqQ= @@ -1847,7 +1859,6 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1973,7 +1984,6 @@ github.com/seccomp/containers-golang v0.3.1/go.mod h1:ZUNmbYf+/7mfX5qYV07/krJnTd github.com/seccomp/libseccomp-golang v0.9.0/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= -github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83 h1:AtnWoOvTioyDXFvu96MWEeE8qj4COSQnJogzLy/u41A= github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= @@ -2036,7 +2046,6 @@ github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/go-diff v0.5.1 h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -2047,7 +2056,6 @@ github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -2056,7 +2064,6 @@ github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -2070,7 +2077,6 @@ github.com/spf13/viper v1.3.0/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stathat/go v1.0.0/go.mod h1:+9Eg2szqkcOGWv6gfheJmBBsmq9Qf5KDbzy8/aYYR0c= @@ -2082,7 +2088,6 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -2091,7 +2096,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= @@ -2123,7 +2127,6 @@ github.com/terraform-providers/terraform-provider-vsphere v1.16.2/go.mod h1:yTPD github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= @@ -2135,7 +2138,6 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tombuildsstuff/giovanni v0.7.1 h1:QJG5TJNIjcRbMsaQGF1HtWEpZbu8xLAOmZuMIk7wf14= github.com/tombuildsstuff/giovanni v0.7.1/go.mod h1:Xu/XU+DiRrKTDoCnJNGuh9ysD0eJyi/zU/naFh2aN9I= -github.com/tommy-muehle/go-mnd v1.1.1 h1:4D0wuPKjOTiK2garzuPGGvm4zZ/wLYDOH8TJSABC7KU= github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -2152,9 +2154,7 @@ github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4A github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/funlen v0.0.2 h1:Av96YVBwwNSe4MLR7iI/BIa3VyI7/djnto/pK3Uxbdo= github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= @@ -2164,7 +2164,6 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/uudashr/gocognit v1.0.1 h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs= github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= @@ -2191,7 +2190,6 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.21.0/go.mod h1:zbnFoBQ9GIjs2RVETy8CNEpb+L+Lwkjs3XZUL0B3/m0= -github.com/vmware/govmomi v0.22.1 h1:ZIEYmBdAS2i+s7RctapqdHfbeGiUcL8LRN05uS4TfPc= github.com/vmware/govmomi v0.22.1/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/govmomi v0.22.2 h1:hmLv4f+RMTTseqtJRijjOWzwELiaLMIoHv2D6H3bF4I= github.com/vmware/govmomi v0.22.2/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= @@ -2319,6 +2317,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181112044915-a3060d491354/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2346,8 +2346,9 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190607214518-6fa95d984e88/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -2410,7 +2411,6 @@ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2508,6 +2508,7 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191002091554-b397fe3ad8ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab h1:FvshnhkKW+LO3HWHodML8kuVX8rnJTxKm9dFPuI68UM= @@ -2523,6 +2524,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2629,9 +2631,8 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191204011308-9611592c72f6/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200102140908-9497f49d5709/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868 h1:6VZw2h4iwEB4GwgQU3Jvcsm8l9+yReTrErAEK1k6AC4= golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204192400-7124308813f3 h1:Ms82wn6YK4ZycO6Bxyh0kxX3gFFVGo79CCuc52xgcys= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204192400-7124308813f3/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200214201135-548b770e2dfa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2 h1:0sfSpGSa544Fwnbot3Oxq/U6SXqjty6Jy/3wRhVS7ig= @@ -2645,6 +2646,7 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.0.0-20190915125329-975d99cd20a9/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.0.0-20190929233944-b20cf7805fc4 h1:80AnKo1DpT8nqeykLyDUg+tN4ayNoz5i6bA57C15BYc= gonum.org/v1/gonum v0.0.0-20190929233944-b20cf7805fc4/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -2660,6 +2662,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.4/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -2754,10 +2757,10 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 h1:B0J02caTR6tpSJozBJyiAzT6CtBzjclw4pgm9gg8Ys0= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= @@ -2785,6 +2788,7 @@ k8s.io/client-go v0.17.1 h1:LbbuZ5tI7OYx4et5DfRFcJuoojvpYO0c7vps2rgJsHY= k8s.io/client-go v0.17.1/go.mod h1:HZtHJSC/VuSHcETN9QA5QDZky1tXiYrkF/7t7vRpO1A= k8s.io/cloud-provider v0.17.1/go.mod h1:QM00lVsYDC7gfXmrSCmiVVmRNk6zE8ciiuqskXDsjMM= k8s.io/cluster-bootstrap v0.17.1/go.mod h1:bp4yDMvUBdGyYJoT2mLAb+WGgkouUanvrEyWEu7mJes= +k8s.io/code-generator v0.17.1 h1:e3B1UqRzRUWygp7WD+QTRT3ZUahPIaRKF0OFa7duQwI= k8s.io/code-generator v0.17.1/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.1/go.mod h1:LrBPZkXtlvGjBzDJa0+b7E5Ij4VoAAKrOGudRC5z2eY= k8s.io/cri-api v0.17.1/go.mod h1:BzAkbBHHp81d+aXzbiIcUbilLkbXa40B8mUHOk6EX3s= @@ -2792,6 +2796,7 @@ k8s.io/csi-translation-lib v0.17.1/go.mod h1:EWeHQJcexqar6avuUocMwEJOYkboteNM9OD k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190907103519-ebc107f98eab h1:j4L8spMe0tFfBvvW6lrc0c+Ql8+nnkcV3RYfi3eSwGY= k8s.io/gengo v0.0.0-20190907103519-ebc107f98eab/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -2827,18 +2832,18 @@ k8s.io/utils v0.0.0-20190923111123-69764acb6e8e/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54= k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f h1:Cq7MalBHYACRd6EesksG1Q8EoIAKOsiZviGKbOLIej4= mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -2852,6 +2857,8 @@ sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZw sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= sigs.k8s.io/controller-tools v0.2.2-0.20190930215132-4752ed2de7d2/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= +sigs.k8s.io/controller-tools v0.2.9-0.20200331153640-3c5446d407dd h1:q2VrviTVgpbV4H+J8XjXnFtuGCkgED3M9tcykEgN7c4= +sigs.k8s.io/controller-tools v0.2.9-0.20200331153640-3c5446d407dd/go.mod h1:D2LzYpGDYjxaAALDVYAwaqaKp2fNuyO5yfOBoU/cbBE= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= @@ -2863,10 +2870,11 @@ sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190107175209-d9ea5c54f7dc/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/hack/update-rhcos-bootimage.py b/hack/update-rhcos-bootimage.py index 56da03a33b7..24237dfe55f 100755 --- a/hack/update-rhcos-bootimage.py +++ b/hack/update-rhcos-bootimage.py @@ -34,7 +34,7 @@ } newmeta['baseURI'] = urllib.parse.urljoin(args.meta, '.') -with open(os.path.join(metadata_dir, f"rhcos-{args.arch}.json"), 'w') as f: +with open(os.path.join(metadata_dir, 'rhcos-{}.json'.format(args.arch)), 'w') as f: json.dump(newmeta, f, sort_keys=True, indent=4) # Continue to populate the legacy metadata file because there are still diff --git a/images/installer/Dockerfile.upi.ci b/images/installer/Dockerfile.upi.ci index bfad7d212f2..7111fe5877b 100644 --- a/images/installer/Dockerfile.upi.ci +++ b/images/installer/Dockerfile.upi.ci @@ -19,24 +19,32 @@ COPY --from=builder /go/src/github.com/openshift/installer/data/data/rhcos.json RUN rpm --import https://packages.microsoft.com/keys/microsoft.asc RUN sh -c 'echo -e "[azure-cli]\nname=Azure CLI\nbaseurl=https://packages.microsoft.com/yumrepos/azure-cli\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.microsoft.com/keys/microsoft.asc" >/etc/yum.repos.d/azure-cli.repo' -RUN yum install --setopt=tsflags=nodocs -y \ - gettext \ - openssh-clients \ - azure-cli \ - openssl && \ - yum update -y && \ +RUN sh -c 'echo -e "[google-cloud-sdk]\nname=Google Cloud SDK\nbaseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64\nenabled=1\ngpgcheck=1\nrepo_gpgcheck=1\ngpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg\n https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg" > /etc/yum.repos.d/google-cloud-sdk.repo' + +RUN yum update -y && \ yum install --setopt=tsflags=nodocs -y \ - unzip gzip jq util-linux && \ - yum clean all && rm -rf /var/cache/yum/* && \ + azure-cli \ + gettext \ + google-cloud-sdk \ + gzip \ + jq \ + unzip \ + openssh-clients \ + openssl \ + pyOpenSSL \ + PyYAML \ + util-linux && \ + yum clean all && \ + rm -rf /var/cache/yum/* && \ chmod g+w /etc/passwd -ENV TERRAFORM_VERSION=0.11.11 +ENV TERRAFORM_VERSION=0.12.24 RUN curl -O https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -d /bin/ -ENV MATCHBOX_VERSION=v0.2.3 -RUN curl -L -O https://github.com/poseidon/terraform-provider-matchbox/releases/download/${MATCHBOX_VERSION}/terraform-provider-matchbox-${MATCHBOX_VERSION}-linux-amd64.tar.gz && \ - tar xzf terraform-provider-matchbox-${MATCHBOX_VERSION}-linux-amd64.tar.gz && \ - mv terraform-provider-matchbox-${MATCHBOX_VERSION}-linux-amd64/terraform-provider-matchbox /bin/terraform-provider-matchbox +ENV MATCHBOX_PROVIDER_VERSION=v0.3.0 +RUN curl -L -O https://github.com/poseidon/terraform-provider-matchbox/releases/download/${MATCHBOX_PROVIDER_VERSION}/terraform-provider-matchbox-${MATCHBOX_PROVIDER_VERSION}-linux-amd64.tar.gz && \ + tar xzf terraform-provider-matchbox-${MATCHBOX_PROVIDER_VERSION}-linux-amd64.tar.gz && \ + mv terraform-provider-matchbox-${MATCHBOX_PROVIDER_VERSION}-linux-amd64/terraform-provider-matchbox /bin/terraform-provider-matchbox RUN curl -L -O https://github.com/vmware/govmomi/releases/download/v0.20.0/govc_linux_amd64.gz && \ gzip -d govc_linux_amd64.gz && \ chmod +x govc_linux_amd64 && mv govc_linux_amd64 /bin/govc diff --git a/images/openstack/Dockerfile.ci b/images/openstack/Dockerfile.ci index 9c817a2d186..6710414aa98 100644 --- a/images/openstack/Dockerfile.ci +++ b/images/openstack/Dockerfile.ci @@ -16,7 +16,8 @@ COPY --from=builder /go/src/github.com/openshift/installer/images/openstack/rdo- # Install Dependendencies for tests # https://github.com/openshift/origin/blob/6114cbc507bf18890f009f16ee424a62007bc390/images/tests/Dockerfile.rhel -RUN yum install --setopt=tsflags=nodocs -y git gzip util-linux && yum clean all && rm -rf /var/cache/yum/* && \ +RUN yum install --setopt=tsflags=nodocs -y git gzip util-linux && \ + yum clean all && rm -rf /var/cache/yum/* && \ localedef -c -f UTF-8 -i en_US en_US.UTF-8 && \ git config --system user.name test && \ git config --system user.email test@example.com && \ @@ -27,11 +28,17 @@ RUN yum update -y && \ python-openstackclient ansible python-openstacksdk python-netaddr unzip && \ yum clean all && rm -rf /var/cache/yum/* +# The Continuous Integration machinery relies on Route53 for DNS while testing the cluster. RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ unzip awscliv2.zip && \ ./aws/install -b /bin && \ rm -rf ./aws awscliv2.zip +# The Continuous Integration machinery relies on jq to parse JSON configuration files. +RUN curl -sSo /etc/yum.repos.d/rdo-trunk-runtime-deps.repo https://trunk.rdoproject.org/centos7/rdo-trunk-runtime-deps.repo && \ + yum install --setopt=tsflags=nodocs --disablerepo='*' --enablerepo=delorean-master-testing -y jq && \ + yum clean all && rm -rf /var/cache/yum/* + RUN mkdir /output && chown 1000:1000 /output USER 1000:1000 ENV PATH /bin diff --git a/pkg/asset/cluster/aws/aws.go b/pkg/asset/cluster/aws/aws.go index df5952417de..5ad02450446 100644 --- a/pkg/asset/cluster/aws/aws.go +++ b/pkg/asset/cluster/aws/aws.go @@ -23,6 +23,7 @@ func Metadata(clusterID, infraID string, config *types.InstallConfig) *awstypes. }, { "openshiftClusterID": clusterID, }}, + ServiceEndpoints: config.AWS.ServiceEndpoints, } } diff --git a/pkg/asset/cluster/cluster.go b/pkg/asset/cluster/cluster.go index 4448350f7ed..a2ba985d6cb 100644 --- a/pkg/asset/cluster/cluster.go +++ b/pkg/asset/cluster/cluster.go @@ -14,6 +14,7 @@ import ( "github.com/openshift/installer/pkg/asset/cluster/aws" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/password" + "github.com/openshift/installer/pkg/metrics/timer" "github.com/openshift/installer/pkg/terraform" ) @@ -81,6 +82,8 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) { } } + timer.StartTimer("Infrastructure") + stateFile, err := terraform.Apply(tmpDir, installConfig.Config.Platform.Name(), extraArgs...) if err != nil { err = errors.Wrap(err, "failed to create cluster") @@ -104,6 +107,7 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) { logrus.Errorf("Failed to read tfstate: %v", err2) } + timer.StopTimer("Infrastructure") return err } diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 3d20d99a42c..6bf342a9a77 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -2,16 +2,19 @@ package cluster import ( "context" + "encoding/base64" "encoding/json" "fmt" "io/ioutil" "net" + "net/url" "os" + "strings" igntypes "github.com/coreos/ignition/config/v2_2/types" gcpprovider "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" - vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1" + vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" "github.com/pkg/errors" "github.com/sirupsen/logrus" awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1beta1" @@ -30,6 +33,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines" "github.com/openshift/installer/pkg/asset/openshiftinstall" "github.com/openshift/installer/pkg/asset/rhcos" + "github.com/openshift/installer/pkg/asset/tls" "github.com/openshift/installer/pkg/tfvars" awstfvars "github.com/openshift/installer/pkg/tfvars/aws" azuretfvars "github.com/openshift/installer/pkg/tfvars/azure" @@ -89,6 +93,7 @@ func (t *TerraformVariables) Dependencies() []asset.Asset { &machine.Master{}, &machines.Master{}, &machines.Worker{}, + &tls.RootCA{}, } } @@ -103,7 +108,8 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { workersAsset := &machines.Worker{} rhcosImage := new(rhcos.Image) rhcosBootstrapImage := new(rhcos.BootstrapImage) - parents.Get(clusterID, installConfig, bootstrapIgnAsset, masterIgnAsset, mastersAsset, workersAsset, rhcosImage, rhcosBootstrapImage) + rootCA := &tls.RootCA{} + parents.Get(clusterID, installConfig, bootstrapIgnAsset, masterIgnAsset, mastersAsset, workersAsset, rhcosImage, rhcosBootstrapImage, rootCA) platform := installConfig.Config.Platform.Name() switch platform { @@ -126,12 +132,22 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { } } + machineV4CIDRs, machineV6CIDRs := []string{}, []string{} + for _, network := range installConfig.Config.Networking.MachineNetwork { + if network.CIDR.IPNet.IP.To4() != nil { + machineV4CIDRs = append(machineV4CIDRs, network.CIDR.IPNet.String()) + } else { + machineV6CIDRs = append(machineV6CIDRs, network.CIDR.IPNet.String()) + } + } + masterCount := len(mastersAsset.MachineFiles) data, err := tfvars.TFVars( clusterID.InfraID, installConfig.Config.ClusterDomain(), installConfig.Config.BaseDomain, - &installConfig.Config.Networking.MachineNetwork[0].CIDR.IPNet, + machineV4CIDRs, + machineV6CIDRs, useIPv4, useIPv6, bootstrapIgn, @@ -199,13 +215,22 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { for i, m := range workers { workerConfigs[i] = m.Spec.Template.Spec.ProviderSpec.Value.Object.(*awsprovider.AWSMachineProviderConfig) } + osImage := strings.SplitN(string(*rhcosImage), ",", 2) + osImageID := osImage[0] + osImageRegion := installConfig.Config.AWS.Region + if len(osImage) == 2 { + osImageRegion = osImage[1] + } data, err := awstfvars.TFVars(awstfvars.TFVarsSources{ VPC: vpc, PrivateSubnets: privateSubnets, PublicSubnets: publicSubnets, + Services: installConfig.Config.AWS.ServiceEndpoints, Publish: installConfig.Config.Publish, MasterConfigs: masterConfigs, WorkerConfigs: workerConfigs, + AMIID: osImageID, + AMIRegion: osImageRegion, }) if err != nil { return errors.Wrapf(err, "failed to get %s Terraform variables", platform) @@ -242,18 +267,6 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*azureprovider.AzureMachineProviderSpec) } - var ( - machineV4CIDRs []net.IPNet - machineV6CIDRs []net.IPNet - ) - for _, network := range installConfig.Config.Networking.MachineNetwork { - if network.CIDR.IPNet.IP.To4() != nil { - machineV4CIDRs = append(machineV4CIDRs, network.CIDR.IPNet) - } else { - machineV6CIDRs = append(machineV6CIDRs, network.CIDR.IPNet) - } - } - preexistingnetwork := installConfig.Config.Azure.VirtualNetwork != "" data, err := azuretfvars.TFVars( azuretfvars.TFVarsSources{ @@ -264,8 +277,6 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { ImageURL: string(*rhcosImage), PreexistingNetwork: preexistingnetwork, Publish: installConfig.Config.Publish, - MachineV4CIDRs: machineV4CIDRs, - MachineV6CIDRs: machineV6CIDRs, }, ) if err != nil { @@ -336,6 +347,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { &installConfig.Config.Networking.MachineNetwork[0].CIDR.IPNet, installConfig.Config.Platform.Libvirt.Network.IfName, masterCount, + installConfig.Config.ControlPlane.Architecture, ) if err != nil { return errors.Wrapf(err, "failed to get %s Terraform variables", platform) @@ -363,33 +375,27 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { if err != nil { return err } - apiVIP, err := openstackdefaults.APIVIP(installConfig.Config.Networking) - if err != nil { - return err - } dnsVIP, err := openstackdefaults.DNSVIP(installConfig.Config.Networking) if err != nil { return err } - ingressVIP, err := openstackdefaults.IngressVIP(installConfig.Config.Networking) - if err != nil { - return err - } data, err = openstacktfvars.TFVars( masters[0].Spec.ProviderSpec.Value.Object.(*openstackprovider.OpenstackProviderSpec), installConfig.Config.Platform.OpenStack.Cloud, installConfig.Config.Platform.OpenStack.ExternalNetwork, installConfig.Config.Platform.OpenStack.ExternalDNS, installConfig.Config.Platform.OpenStack.LbFloatingIP, - apiVIP.String(), + installConfig.Config.Platform.OpenStack.APIVIP, dnsVIP.String(), - ingressVIP.String(), + installConfig.Config.Platform.OpenStack.IngressVIP, installConfig.Config.Platform.OpenStack.TrunkSupport, installConfig.Config.Platform.OpenStack.OctaviaSupport, string(*rhcosImage), clusterID.InfraID, caCert, bootstrapIgn, + installConfig.Config.ControlPlane.Platform.OpenStack, + installConfig.Config.Platform.OpenStack.MachinesSubnet, ) if err != nil { return errors.Wrapf(err, "failed to get %s Terraform variables", platform) @@ -399,6 +405,11 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { Data: data, }) case baremetal.Name: + ignitionURL := &url.URL{ + Scheme: "https", + Host: net.JoinHostPort(installConfig.Config.Platform.BareMetal.APIVIP, "22623"), + Path: "config/master", + } data, err = baremetaltfvars.TFVars( installConfig.Config.Platform.BareMetal.LibvirtURI, installConfig.Config.Platform.BareMetal.BootstrapProvisioningIP, @@ -407,6 +418,8 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { installConfig.Config.Platform.BareMetal.ProvisioningBridge, installConfig.Config.Platform.BareMetal.Hosts, string(*rhcosImage), + ignitionURL.String(), + base64.StdEncoding.EncodeToString(rootCA.Cert()), ) if err != nil { return errors.Wrapf(err, "failed to get %s Terraform variables", platform) diff --git a/pkg/asset/ignition/bootstrap/bootstrap.go b/pkg/asset/ignition/bootstrap/bootstrap.go index aac77623e32..b36d888e328 100644 --- a/pkg/asset/ignition/bootstrap/bootstrap.go +++ b/pkg/asset/ignition/bootstrap/bootstrap.go @@ -90,6 +90,7 @@ func (a *Bootstrap) Dependencies() []asset.Asset { &tls.AggregatorClientCertKey{}, &tls.AggregatorSignerCertKey{}, &tls.APIServerProxyCertKey{}, + &tls.BootstrapSSHKeyPair{}, &tls.EtcdCABundle{}, &tls.EtcdMetricCABundle{}, &tls.EtcdMetricSignerCertKey{}, @@ -135,7 +136,8 @@ func (a *Bootstrap) Generate(dependencies asset.Parents) error { proxy := &manifests.Proxy{} releaseImage := &releaseimage.Image{} rhcosImage := new(rhcos.Image) - dependencies.Get(installConfig, proxy, releaseImage, rhcosImage) + bootstrapSSHKeyPair := &tls.BootstrapSSHKeyPair{} + dependencies.Get(installConfig, proxy, releaseImage, rhcosImage, bootstrapSSHKeyPair) templateData, err := a.getTemplateData(installConfig.Config, releaseImage.PullSpec, installConfig.Config.ImageContentSources, proxy.Config, rhcosImage) @@ -184,7 +186,10 @@ func (a *Bootstrap) Generate(dependencies asset.Parents) error { a.Config.Passwd.Users = append( a.Config.Passwd.Users, - igntypes.PasswdUser{Name: "core", SSHAuthorizedKeys: []igntypes.SSHAuthorizedKey{igntypes.SSHAuthorizedKey(installConfig.Config.SSHKey)}}, + igntypes.PasswdUser{Name: "core", SSHAuthorizedKeys: []igntypes.SSHAuthorizedKey{ + igntypes.SSHAuthorizedKey(installConfig.Config.SSHKey), + igntypes.SSHAuthorizedKey(string(bootstrapSSHKeyPair.Public())), + }}, ) data, err := json.Marshal(a.Config) diff --git a/pkg/asset/ignition/machine/node.go b/pkg/asset/ignition/machine/node.go index 0b4be653491..8f500bc7e94 100644 --- a/pkg/asset/ignition/machine/node.go +++ b/pkg/asset/ignition/machine/node.go @@ -11,7 +11,6 @@ import ( "github.com/openshift/installer/pkg/types" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" openstacktypes "github.com/openshift/installer/pkg/types/openstack" - openstackdefaults "github.com/openshift/installer/pkg/types/openstack/defaults" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -29,10 +28,7 @@ func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, ro // way to configure DNS before Ignition runs. ignitionHost = net.JoinHostPort(installConfig.BareMetal.APIVIP, "22623") case openstacktypes.Name: - apiVIP, err := openstackdefaults.APIVIP(installConfig.Networking) - if err == nil { - ignitionHost = net.JoinHostPort(apiVIP.String(), "22623") - } + ignitionHost = net.JoinHostPort(installConfig.OpenStack.APIVIP, "22623") case ovirttypes.Name: ignitionHost = net.JoinHostPort(installConfig.Ovirt.APIVIP, "22623") case vspheretypes.Name: diff --git a/pkg/asset/installconfig/aws/basedomain.go b/pkg/asset/installconfig/aws/basedomain.go index d0f764c7eaa..fb30f9803d9 100644 --- a/pkg/asset/installconfig/aws/basedomain.go +++ b/pkg/asset/installconfig/aws/basedomain.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/route53" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -75,7 +76,7 @@ func GetBaseDomain() (string, error) { } // GetPublicZone returns a public route53 zone that matches the name. -func GetPublicZone(name string) (*route53.HostedZone, error) { +func GetPublicZone(sess *session.Session, name string) (*route53.HostedZone, error) { var res *route53.HostedZone f := func(resp *route53.ListHostedZonesOutput, lastPage bool) (shouldContinue bool) { for idx, zone := range resp.HostedZones { @@ -87,11 +88,7 @@ func GetPublicZone(name string) (*route53.HostedZone, error) { return !lastPage } - session, err := GetSession() - if err != nil { - return nil, errors.Wrap(err, "getting AWS session") - } - client := route53.New(session) + client := route53.New(sess) if err := client.ListHostedZonesPages(&route53.ListHostedZonesInput{}, f); err != nil { return nil, errors.Wrap(err, "listing hosted zones") } diff --git a/pkg/asset/installconfig/aws/metadata.go b/pkg/asset/installconfig/aws/metadata.go index 39b4b0abffb..47a736c7a11 100644 --- a/pkg/asset/installconfig/aws/metadata.go +++ b/pkg/asset/installconfig/aws/metadata.go @@ -6,6 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/pkg/errors" + + typesaws "github.com/openshift/installer/pkg/types/aws" ) // Metadata holds additional metadata for InstallConfig resources that @@ -16,15 +18,18 @@ type Metadata struct { availabilityZones []string privateSubnets map[string]Subnet publicSubnets map[string]Subnet - Region string `json:"region,omitempty"` - Subnets []string `json:"subnets,omitempty"` vpc string - mutex sync.Mutex + + Region string `json:"region,omitempty"` + Subnets []string `json:"subnets,omitempty"` + Services []typesaws.ServiceEndpoint `json:"services,omitempty"` + + mutex sync.Mutex } // NewMetadata initializes a new Metadata object. -func NewMetadata(region string, subnets []string) *Metadata { - return &Metadata{Region: region, Subnets: subnets} +func NewMetadata(region string, subnets []string, services []typesaws.ServiceEndpoint) *Metadata { + return &Metadata{Region: region, Subnets: subnets, Services: services} } // Session holds an AWS session which can be used for AWS API calls @@ -39,7 +44,7 @@ func (m *Metadata) Session(ctx context.Context) (*session.Session, error) { func (m *Metadata) unlockedSession(ctx context.Context) (*session.Session, error) { if m.session == nil { var err error - m.session, err = GetSession() + m.session, err = GetSessionWithOptions(WithRegion(m.Region), WithServiceEndpoints(m.Region, m.Services)) if err != nil { return nil, errors.Wrap(err, "creating AWS session") } diff --git a/pkg/asset/installconfig/aws/permissions.go b/pkg/asset/installconfig/aws/permissions.go index 851d7455b12..d731517b565 100644 --- a/pkg/asset/installconfig/aws/permissions.go +++ b/pkg/asset/installconfig/aws/permissions.go @@ -69,6 +69,7 @@ var permissions = map[PermissionGroup][]string{ "ec2:DescribeVpcClassicLinkDnsSupport", "ec2:DescribeVpcEndpoints", "ec2:DescribeVpcs", + "ec2:GetEbsDefaultKmsKeyId", "ec2:ModifyInstanceAttribute", "ec2:ModifyNetworkInterfaceAttribute", "ec2:ReleaseAddress", @@ -140,6 +141,7 @@ var permissions = map[PermissionGroup][]string{ "s3:CreateBucket", "s3:DeleteBucket", "s3:GetAccelerateConfiguration", + "s3:GetBucketAcl", "s3:GetBucketCors", "s3:GetBucketLocation", "s3:GetBucketLogging", @@ -180,6 +182,7 @@ var permissions = map[PermissionGroup][]string{ "iam:ListRolePolicies", "iam:ListUserPolicies", "s3:DeleteObject", + "s3:ListBucketVersions", "tag:GetResources", }, // Permissions required for creating network resources diff --git a/pkg/asset/installconfig/aws/platform.go b/pkg/asset/installconfig/aws/platform.go index adde4835137..78ffa8f3114 100644 --- a/pkg/asset/installconfig/aws/platform.go +++ b/pkg/asset/installconfig/aws/platform.go @@ -5,18 +5,19 @@ import ( "sort" "strings" - "github.com/openshift/installer/pkg/types/aws" - "github.com/openshift/installer/pkg/types/aws/validation" "github.com/pkg/errors" "github.com/sirupsen/logrus" survey "gopkg.in/AlecAivazis/survey.v1" + + "github.com/openshift/installer/pkg/types/aws" ) // Platform collects AWS-specific configuration. func Platform() (*aws.Platform, error) { - longRegions := make([]string, 0, len(validation.Regions)) - shortRegions := make([]string, 0, len(validation.Regions)) - for id, location := range validation.Regions { + regions := knownRegions() + longRegions := make([]string, 0, len(regions)) + shortRegions := make([]string, 0, len(regions)) + for id, location := range regions { longRegions = append(longRegions, fmt.Sprintf("%s (%s)", id, location)) shortRegions = append(shortRegions, id) } @@ -25,8 +26,7 @@ func Platform() (*aws.Platform, error) { }) defaultRegion := "us-east-1" - _, ok := validation.Regions[defaultRegion] - if !ok { + if !IsKnownRegion(defaultRegion) { panic(fmt.Sprintf("installer bug: invalid default AWS region %q", defaultRegion)) } @@ -37,8 +37,7 @@ func Platform() (*aws.Platform, error) { defaultRegionPointer := ssn.Config.Region if defaultRegionPointer != nil && *defaultRegionPointer != "" { - _, ok := validation.Regions[*defaultRegionPointer] - if ok { + if IsKnownRegion(*defaultRegionPointer) { defaultRegion = *defaultRegionPointer } else { logrus.Warnf("Unrecognized AWS region %q, defaulting to %s", *defaultRegionPointer, defaultRegion) @@ -54,7 +53,7 @@ func Platform() (*aws.Platform, error) { Prompt: &survey.Select{ Message: "Region", Help: "The AWS region to be used for installation.", - Default: fmt.Sprintf("%s (%s)", defaultRegion, validation.Regions[defaultRegion]), + Default: fmt.Sprintf("%s (%s)", defaultRegion, regions[defaultRegion]), Options: longRegions, }, Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { diff --git a/pkg/asset/installconfig/aws/regions.go b/pkg/asset/installconfig/aws/regions.go new file mode 100644 index 00000000000..b9a866bc8dd --- /dev/null +++ b/pkg/asset/installconfig/aws/regions.go @@ -0,0 +1,35 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws/endpoints" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/openshift/installer/pkg/rhcos" +) + +// knownRegions is a list of AWS regions that the installer recognizes. +// This is subset of AWS regions and the regions where RHEL CoreOS images are published. +// The result is a map of region identifier to region description +func knownRegions() map[string]string { + required := sets.NewString(rhcos.AMIRegions...) + + regions := make(map[string]string) + for _, partition := range endpoints.DefaultPartitions() { + for _, partitionRegion := range partition.Regions() { + partitionRegion := partitionRegion + if required.Has(partitionRegion.ID()) { + regions[partitionRegion.ID()] = partitionRegion.Description() + } + } + } + return regions +} + +// IsKnownRegion return true is a specified region is Known to the installer. +// A known region is subset of AWS regions and the regions where RHEL CoreOS images are published. +func IsKnownRegion(region string) bool { + if _, ok := knownRegions()[region]; ok { + return true + } + return false +} diff --git a/pkg/asset/installconfig/aws/session.go b/pkg/asset/installconfig/aws/session.go index e695e35d104..aa7d17e37ce 100644 --- a/pkg/asset/installconfig/aws/session.go +++ b/pkg/asset/installconfig/aws/session.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/pkg/errors" @@ -15,6 +16,7 @@ import ( survey "gopkg.in/AlecAivazis/survey.v1" ini "gopkg.in/ini.v1" + typesaws "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/version" ) @@ -30,12 +32,41 @@ var ( } ) +// SessionOptions is a function that modifies the provided session.Option. +type SessionOptions func(sess *session.Options) + +// WithRegion configures the session.Option to set the AWS region. +func WithRegion(region string) SessionOptions { + return func(sess *session.Options) { + cfg := aws.NewConfig().WithRegion(region) + sess.Config.MergeIn(cfg) + } +} + +// WithServiceEndpoints configures the session.Option to use provides services for AWS endpoints. +func WithServiceEndpoints(region string, services []typesaws.ServiceEndpoint) SessionOptions { + return func(sess *session.Options) { + resolver := newAWSResolver(region, services) + cfg := aws.NewConfig().WithEndpointResolver(resolver) + sess.Config.MergeIn(cfg) + } +} + // GetSession returns an AWS session by checking credentials // and, if no creds are found, asks for them and stores them on disk in a config file -func GetSession() (*session.Session, error) { - ssn := session.Must(session.NewSessionWithOptions(session.Options{ +func GetSession() (*session.Session, error) { return GetSessionWithOptions() } + +// GetSessionWithOptions returns an AWS session by checking credentials +// and, if no creds are found, asks for them and stores them on disk in a config file +func GetSessionWithOptions(optFuncs ...SessionOptions) (*session.Session, error) { + options := session.Options{ SharedConfigState: session.SharedConfigEnable, - })) + } + for _, optFunc := range optFuncs { + optFunc(&options) + } + + ssn := session.Must(session.NewSessionWithOptions(options)) sharedCredentialsProvider := &credentials.SharedCredentialsProvider{} ssn.Config.Credentials = credentials.NewChainCredentials([]credentials.Provider{ @@ -140,3 +171,68 @@ func getCredentials() error { return os.Rename(tempPath, path) } + +type awsResolver struct { + region string + services map[string]typesaws.ServiceEndpoint + + // this is a list of known default endpoints for specific regions that would + // otherwise require user to set the service overrides. + // it's a map of region => service => resolved endpoint + // this is only used when the user hasn't specified a override for the service in that region. + defaultEndpoints map[string]map[string]endpoints.ResolvedEndpoint +} + +func newAWSResolver(region string, services []typesaws.ServiceEndpoint) *awsResolver { + resolver := &awsResolver{ + region: region, + services: make(map[string]typesaws.ServiceEndpoint), + defaultEndpoints: defaultEndpoints(), + } + for _, service := range services { + service := service + resolver.services[resolverKey(service.Name)] = service + } + return resolver +} + +func (ar *awsResolver) EndpointFor(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { + if s, ok := ar.services[resolverKey(service)]; ok { + logrus.Debugf("resolved AWS service %s (%s) to %q", service, region, s.URL) + return endpoints.ResolvedEndpoint{ + URL: s.URL, + SigningRegion: ar.region, + }, nil + } + if rv, ok := ar.defaultEndpoints[region]; ok { + if v, ok := rv[service]; ok { + return v, nil + } + } + return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +} + +func resolverKey(service string) string { + return service +} + +// this is a list of known default endpoints for specific regions that would +// otherwise require user to set the service overrides. +// it's a map of region => service => resolved endpoint +// this is only used when the user hasn't specified a override for the service in that region. +func defaultEndpoints() map[string]map[string]endpoints.ResolvedEndpoint { + return map[string]map[string]endpoints.ResolvedEndpoint{ + endpoints.CnNorth1RegionID: { + "route53": { + URL: "https://route53.amazonaws.com.cn", + SigningRegion: endpoints.CnNorthwest1RegionID, + }, + }, + endpoints.CnNorthwest1RegionID: { + "route53": { + URL: "https://route53.amazonaws.com.cn", + SigningRegion: endpoints.CnNorthwest1RegionID, + }, + }, + } +} diff --git a/pkg/asset/installconfig/aws/session_test.go b/pkg/asset/installconfig/aws/session_test.go new file mode 100644 index 00000000000..b6fa704179d --- /dev/null +++ b/pkg/asset/installconfig/aws/session_test.go @@ -0,0 +1,53 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + typesaws "github.com/openshift/installer/pkg/types/aws" +) + +func TestAWSResolver(t *testing.T) { + overrides := []typesaws.ServiceEndpoint{{ + Name: "ec2", + URL: "test-ec2.local", + }, { + Name: "s3", + URL: "https://test-s3.local", + }} + + cases := []struct { + iservice, iregion string + overrides []typesaws.ServiceEndpoint + expected string + }{{ + iservice: "ec2", + iregion: "us-east-1", + expected: "https://ec2.us-east-1.amazonaws.com", + }, { + iservice: "ec2", + iregion: "us-east-1", + overrides: overrides, + expected: "test-ec2.local", + }, { + iservice: "s3", + iregion: "us-east-1", + overrides: overrides, + expected: "https://test-s3.local", + }, { + iservice: "elasticloadbalancing", + iregion: "us-east-1", + overrides: overrides, + expected: "https://elasticloadbalancing.us-east-1.amazonaws.com", + }} + for idx, test := range cases { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + resolvers := newAWSResolver(test.iregion, test.overrides) + endpoint, err := resolvers.EndpointFor(test.iservice, test.iregion) + assert.NoError(t, err) + assert.Equal(t, test.expected, endpoint.URL) + }) + } +} diff --git a/pkg/asset/installconfig/aws/validation.go b/pkg/asset/installconfig/aws/validation.go index 7f3bdb3ad87..be8fc9d0a13 100644 --- a/pkg/asset/installconfig/aws/validation.go +++ b/pkg/asset/installconfig/aws/validation.go @@ -6,7 +6,9 @@ import ( "net" "sort" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/pkg/errors" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" @@ -37,9 +39,17 @@ func Validate(ctx context.Context, meta *Metadata, config *types.InstallConfig) func validatePlatform(ctx context.Context, meta *Metadata, fldPath *field.Path, platform *awstypes.Platform, networking *types.Networking, publish types.PublishingStrategy) field.ErrorList { allErrs := field.ErrorList{} + + if !isAWSSDKRegion(platform.Region) && platform.AMIID == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("amiID"), "AMI must be provided")) + } + if len(platform.Subnets) > 0 { allErrs = append(allErrs, validateSubnets(ctx, meta, fldPath.Child("subnets"), platform.Subnets, networking, publish)...) } + if err := validateServiceEndpoints(fldPath.Child("serviceEndpoints"), platform.Region, platform.ServiceEndpoints); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceEndpoints"), platform.ServiceEndpoints, err.Error())) + } if platform.DefaultMachinePlatform != nil { allErrs = append(allErrs, validateMachinePool(ctx, meta, fldPath.Child("defaultMachinePlatform"), platform, platform.DefaultMachinePlatform)...) } @@ -165,3 +175,40 @@ func validateDuplicateSubnetZones(fldPath *field.Path, subnets map[string]Subnet } return allErrs } + +func validateServiceEndpoints(fldPath *field.Path, region string, services []awstypes.ServiceEndpoint) error { + if isAWSSDKRegion(region) { + return nil + } + + resolver := newAWSResolver(region, services) + var errs []error + for _, service := range requiredServices { + _, err := resolver.EndpointFor(service, region, endpoints.StrictMatchingOption) + if err != nil { + errs = append(errs, errors.Wrapf(err, "failed to find endpoint for service %q", service)) + } + } + return utilerrors.NewAggregate(errs) +} + +func isAWSSDKRegion(region string) bool { + for _, partition := range endpoints.DefaultPartitions() { + for _, partitionRegion := range partition.Regions() { + if region == partitionRegion.ID() { + return true + } + } + } + return false +} + +var requiredServices = []string{ + "ec2", + "elasticloadbalancing", + "iam", + "route53", + "s3", + "sts", + "tagging", +} diff --git a/pkg/asset/installconfig/aws/validation_test.go b/pkg/asset/installconfig/aws/validation_test.go index 6613d5b7ea8..9dbf3960db8 100644 --- a/pkg/asset/installconfig/aws/validation_test.go +++ b/pkg/asset/installconfig/aws/validation_test.go @@ -25,6 +25,7 @@ func validInstallConfig() *types.InstallConfig { Publish: types.ExternalPublishingStrategy, Platform: types.Platform{ AWS: &aws.Platform{ + Region: "us-east-1", Subnets: []string{ "valid-private-subnet-a", "valid-private-subnet-b", @@ -90,6 +91,31 @@ func validPublicSubnets() map[string]Subnet { } } +func validServiceEndpoints() []aws.ServiceEndpoint { + return []aws.ServiceEndpoint{{ + Name: "ec2", + URL: "e2e.local", + }, { + Name: "s3", + URL: "e2e.local", + }, { + Name: "iam", + URL: "e2e.local", + }, { + Name: "elasticloadbalancing", + URL: "e2e.local", + }, { + Name: "tagging", + URL: "e2e.local", + }, { + Name: "route53", + URL: "e2e.local", + }, { + Name: "sts", + URL: "e2e.local", + }} +} + func TestValidate(t *testing.T) { tests := []struct { name string @@ -102,7 +128,7 @@ func TestValidate(t *testing.T) { name: "valid no byo", installConfig: func() *types.InstallConfig { c := validInstallConfig() - c.Platform.AWS = &aws.Platform{} + c.Platform.AWS = &aws.Platform{Region: "us-east-1"} return c }(), availZones: validAvailZones(), @@ -315,6 +341,55 @@ func TestValidate(t *testing.T) { privateSubnets: validPrivateSubnets(), publicSubnets: validPublicSubnets(), exptectErr: `^\[compute\[0\]\.platform\.aws\.zones: Invalid value: \[\]string{\"a\", \"b\", \"c\", \"d\"}: No subnets provided for zones \[d\], compute\[1\]\.platform\.aws\.zones: Invalid value: \[\]string{\"a\", \"b\", \"e\"}: No subnets provided for zones \[e\]\]$`, + }, { + name: "custom region invalid service endpoints none provided", + installConfig: func() *types.InstallConfig { + c := validInstallConfig() + c.Platform.AWS.Region = "test-region" + c.Platform.AWS.AMIID = "dummy-id" + return c + }(), + availZones: validAvailZones(), + privateSubnets: validPrivateSubnets(), + publicSubnets: validPublicSubnets(), + exptectErr: `^platform\.aws\.serviceEndpoints: Invalid value: (.|\n)*: \[failed to find endpoint for service "ec2": (.|\n)*, failed to find endpoint for service "elasticloadbalancing": (.|\n)*, failed to find endpoint for service "iam": (.|\n)*, failed to find endpoint for service "route53": (.|\n)*, failed to find endpoint for service "s3": (.|\n)*, failed to find endpoint for service "sts": (.|\n)*, failed to find endpoint for service "tagging": (.|\n)*\]$`, + }, { + name: "custom region invalid service endpoints some provided", + installConfig: func() *types.InstallConfig { + c := validInstallConfig() + c.Platform.AWS.Region = "test-region" + c.Platform.AWS.AMIID = "dummy-id" + c.Platform.AWS.ServiceEndpoints = validServiceEndpoints()[:3] + return c + }(), + availZones: validAvailZones(), + privateSubnets: validPrivateSubnets(), + publicSubnets: validPublicSubnets(), + exptectErr: `^platform\.aws\.serviceEndpoints: Invalid value: (.|\n)*: \[failed to find endpoint for service "elasticloadbalancing": (.|\n)*, failed to find endpoint for service "route53": (.|\n)*, failed to find endpoint for service "sts": (.|\n)*, failed to find endpoint for service "tagging": (.|\n)*$`, + }, { + name: "custom region valid service endpoints", + installConfig: func() *types.InstallConfig { + c := validInstallConfig() + c.Platform.AWS.Region = "test-region" + c.Platform.AWS.AMIID = "dummy-id" + c.Platform.AWS.ServiceEndpoints = validServiceEndpoints() + return c + }(), + availZones: validAvailZones(), + privateSubnets: validPrivateSubnets(), + publicSubnets: validPublicSubnets(), + }, { + name: "AMI not provided for unknown region", + installConfig: func() *types.InstallConfig { + c := validInstallConfig() + c.Platform.AWS.Region = "test-region" + c.Platform.AWS.ServiceEndpoints = validServiceEndpoints() + return c + }(), + availZones: validAvailZones(), + privateSubnets: validPrivateSubnets(), + publicSubnets: validPublicSubnets(), + exptectErr: `^platform\.aws\.amiID: Required value: AMI must be provided$`, }} for _, test := range tests { diff --git a/pkg/asset/installconfig/azure/azure.go b/pkg/asset/installconfig/azure/azure.go index 35c0b585cdd..5f49b212019 100644 --- a/pkg/asset/installconfig/azure/azure.go +++ b/pkg/asset/installconfig/azure/azure.go @@ -13,9 +13,6 @@ import ( "github.com/pkg/errors" survey "gopkg.in/AlecAivazis/survey.v1" - - azres "github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/resources" - azsub "github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/subscriptions" ) const ( @@ -88,39 +85,36 @@ func Platform() (*azure.Platform, error) { } func getRegions() (map[string]string, error) { - session, err := GetSession() + client, err := NewClient(context.TODO()) if err != nil { return nil, err } - client := azsub.NewClient() - client.Authorizer = session.Authorizer - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + + ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Minute) defer cancel() - locations, err := client.ListLocations(ctx, session.Credentials.SubscriptionID) + + locations, err := client.ListLocations(ctx) if err != nil { return nil, err } - locationsValue := *locations.Value allLocations := map[string]string{} - for _, location := range locationsValue { + for _, location := range *locations { allLocations[to.String(location.Name)] = to.String(location.DisplayName) } return allLocations, nil } func getResourceCapableRegions() ([]string, error) { - session, err := GetSession() + client, err := NewClient(context.TODO()) if err != nil { return nil, err } - client := azres.NewProvidersClient(session.Credentials.SubscriptionID) - client.Authorizer = session.Authorizer - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Minute) defer cancel() - provider, err := client.Get(ctx, "Microsoft.Resources", "") + provider, err := client.GetResourcesProvider(ctx, "Microsoft.Resources") if err != nil { return nil, err } diff --git a/pkg/asset/installconfig/azure/client.go b/pkg/asset/installconfig/azure/client.go index a2742932e93..eeea6a226af 100644 --- a/pkg/asset/installconfig/azure/client.go +++ b/pkg/asset/installconfig/azure/client.go @@ -7,6 +7,8 @@ import ( "github.com/pkg/errors" aznetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network" + azres "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources" + azsubs "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions" ) //go:generate mockgen -source=./client.go -destination=mock/azureclient_generated.go -package=mock @@ -16,6 +18,8 @@ type API interface { GetVirtualNetwork(ctx context.Context, resourceGroupName, virtualNetwork string) (*aznetwork.VirtualNetwork, error) GetComputeSubnet(ctx context.Context, resourceGroupName, virtualNetwork, subnet string) (*aznetwork.Subnet, error) GetControlPlaneSubnet(ctx context.Context, resourceGroupName, virtualNetwork, subnet string) (*aznetwork.Subnet, error) + ListLocations(ctx context.Context) (*[]azsubs.Location, error) + GetResourcesProvider(ctx context.Context, resourceProviderNamespace string) (*azres.Provider, error) } // Client makes calls to the Azure API. @@ -98,3 +102,53 @@ func (c *Client) getSubnetsClient(ctx context.Context) (*aznetwork.SubnetsClient subnetClient.Authorizer = c.ssn.Authorizer return &subnetClient, nil } + +// ListLocations lists the Azure regions dir the given subscription +func (c *Client) ListLocations(ctx context.Context) (*[]azsubs.Location, error) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + subsClient, err := c.getSubscriptionsClient(ctx) + if err != nil { + return nil, err + } + + locations, err := subsClient.ListLocations(ctx, c.ssn.Credentials.SubscriptionID) + if err != nil { + return nil, errors.Wrapf(err, "failed to list locations") + } + + return locations.Value, nil +} + +// getSubscriptionsClient sets up a new client to retrieve subscription data +func (c *Client) getSubscriptionsClient(ctx context.Context) (azsubs.Client, error) { + client := azsubs.NewClient() + client.Authorizer = c.ssn.Authorizer + return client, nil +} + +// GetResourcesProvider gets the Azure resource provider +func (c *Client) GetResourcesProvider(ctx context.Context, resourceProviderNamespace string) (*azres.Provider, error) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + providersClient, err := c.getProvidersClient(ctx) + if err != nil { + return nil, err + } + + provider, err := providersClient.Get(ctx, resourceProviderNamespace, "") + if err != nil { + return nil, errors.Wrapf(err, "failed to get resource provider %s", resourceProviderNamespace) + } + + return &provider, nil +} + +// getProvidersClient sets up a new client to retrieve providers data +func (c *Client) getProvidersClient(ctx context.Context) (azres.ProvidersClient, error) { + client := azres.NewProvidersClient(c.ssn.Credentials.SubscriptionID) + client.Authorizer = c.ssn.Authorizer + return client, nil +} diff --git a/pkg/asset/installconfig/azure/mock/azureclient_generated.go b/pkg/asset/installconfig/azure/mock/azureclient_generated.go index 903621ebdb7..19244134f0b 100644 --- a/pkg/asset/installconfig/azure/mock/azureclient_generated.go +++ b/pkg/asset/installconfig/azure/mock/azureclient_generated.go @@ -7,34 +7,36 @@ package mock import ( context "context" network "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network" + resources "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources" + subscriptions "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions" gomock "github.com/golang/mock/gomock" reflect "reflect" ) -// MockAPI is a mock of API interface +// MockAPI is a mock of API interface. type MockAPI struct { ctrl *gomock.Controller recorder *MockAPIMockRecorder } -// MockAPIMockRecorder is the mock recorder for MockAPI +// MockAPIMockRecorder is the mock recorder for MockAPI. type MockAPIMockRecorder struct { mock *MockAPI } -// NewMockAPI creates a new mock instance +// NewMockAPI creates a new mock instance. func NewMockAPI(ctrl *gomock.Controller) *MockAPI { mock := &MockAPI{ctrl: ctrl} mock.recorder = &MockAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockAPI) EXPECT() *MockAPIMockRecorder { return m.recorder } -// GetVirtualNetwork mocks base method +// GetVirtualNetwork mocks base method. func (m *MockAPI) GetVirtualNetwork(ctx context.Context, resourceGroupName, virtualNetwork string) (*network.VirtualNetwork, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetVirtualNetwork", ctx, resourceGroupName, virtualNetwork) @@ -43,13 +45,13 @@ func (m *MockAPI) GetVirtualNetwork(ctx context.Context, resourceGroupName, virt return ret0, ret1 } -// GetVirtualNetwork indicates an expected call of GetVirtualNetwork +// GetVirtualNetwork indicates an expected call of GetVirtualNetwork. func (mr *MockAPIMockRecorder) GetVirtualNetwork(ctx, resourceGroupName, virtualNetwork interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualNetwork", reflect.TypeOf((*MockAPI)(nil).GetVirtualNetwork), ctx, resourceGroupName, virtualNetwork) } -// GetComputeSubnet mocks base method +// GetComputeSubnet mocks base method. func (m *MockAPI) GetComputeSubnet(ctx context.Context, resourceGroupName, virtualNetwork, subnet string) (*network.Subnet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetComputeSubnet", ctx, resourceGroupName, virtualNetwork, subnet) @@ -58,13 +60,13 @@ func (m *MockAPI) GetComputeSubnet(ctx context.Context, resourceGroupName, virtu return ret0, ret1 } -// GetComputeSubnet indicates an expected call of GetComputeSubnet +// GetComputeSubnet indicates an expected call of GetComputeSubnet. func (mr *MockAPIMockRecorder) GetComputeSubnet(ctx, resourceGroupName, virtualNetwork, subnet interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetComputeSubnet", reflect.TypeOf((*MockAPI)(nil).GetComputeSubnet), ctx, resourceGroupName, virtualNetwork, subnet) } -// GetControlPlaneSubnet mocks base method +// GetControlPlaneSubnet mocks base method. func (m *MockAPI) GetControlPlaneSubnet(ctx context.Context, resourceGroupName, virtualNetwork, subnet string) (*network.Subnet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetControlPlaneSubnet", ctx, resourceGroupName, virtualNetwork, subnet) @@ -73,8 +75,38 @@ func (m *MockAPI) GetControlPlaneSubnet(ctx context.Context, resourceGroupName, return ret0, ret1 } -// GetControlPlaneSubnet indicates an expected call of GetControlPlaneSubnet +// GetControlPlaneSubnet indicates an expected call of GetControlPlaneSubnet. func (mr *MockAPIMockRecorder) GetControlPlaneSubnet(ctx, resourceGroupName, virtualNetwork, subnet interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetControlPlaneSubnet", reflect.TypeOf((*MockAPI)(nil).GetControlPlaneSubnet), ctx, resourceGroupName, virtualNetwork, subnet) } + +// ListLocations mocks base method. +func (m *MockAPI) ListLocations(ctx context.Context) (*[]subscriptions.Location, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListLocations", ctx) + ret0, _ := ret[0].(*[]subscriptions.Location) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListLocations indicates an expected call of ListLocations. +func (mr *MockAPIMockRecorder) ListLocations(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLocations", reflect.TypeOf((*MockAPI)(nil).ListLocations), ctx) +} + +// GetResourcesProvider mocks base method. +func (m *MockAPI) GetResourcesProvider(ctx context.Context, resourceProviderNamespace string) (*resources.Provider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResourcesProvider", ctx, resourceProviderNamespace) + ret0, _ := ret[0].(*resources.Provider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetResourcesProvider indicates an expected call of GetResourcesProvider. +func (mr *MockAPIMockRecorder) GetResourcesProvider(ctx, resourceProviderNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcesProvider", reflect.TypeOf((*MockAPI)(nil).GetResourcesProvider), ctx, resourceProviderNamespace) +} diff --git a/pkg/asset/installconfig/azure/validation.go b/pkg/asset/installconfig/azure/validation.go index fe8b94cac37..d9c3cb02224 100644 --- a/pkg/asset/installconfig/azure/validation.go +++ b/pkg/asset/installconfig/azure/validation.go @@ -4,8 +4,10 @@ import ( "context" "fmt" "net" + "strings" aznetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network" + "github.com/Azure/go-autorest/autorest/to" aztypes "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types" @@ -17,6 +19,7 @@ func Validate(client API, ic *types.InstallConfig) error { allErrs := field.ErrorList{} allErrs = append(allErrs, validateNetworks(client, ic.Azure, ic.Networking.MachineNetwork, field.NewPath("platform").Child("azure"))...) + allErrs = append(allErrs, validateRegion(client, field.NewPath("platform").Child("azure").Child("region"), ic.Azure)...) return allErrs.ToAggregate() } @@ -69,3 +72,47 @@ func validateMachineNetworksContainIP(fldPath *field.Path, networks []types.Mach } return field.ErrorList{field.Invalid(fldPath, subnetName, fmt.Sprintf("subnet %s address prefix is outside of the specified machine networks", ip))} } + +// validateRegion checks that the desired region is valid and available to the user +func validateRegion(client API, fieldPath *field.Path, p *aztypes.Platform) field.ErrorList { + locations, err := client.ListLocations(context.TODO()) + if err != nil { + return field.ErrorList{field.Invalid(fieldPath, p.Region, "failed to retrieve available regions")} + } + + availableRegions := map[string]string{} + for _, location := range *locations { + availableRegions[to.String(location.Name)] = to.String(location.DisplayName) + } + + displayName, ok := availableRegions[p.Region] + + if !ok { + errMsg := fmt.Sprintf("region %q is not valid or not available for this account", p.Region) + + normalizedRegion := strings.Replace(strings.ToLower(p.Region), " ", "", -1) + if _, ok := availableRegions[normalizedRegion]; ok { + errMsg += fmt.Sprintf(", did you mean %q?", normalizedRegion) + } + + return field.ErrorList{field.Invalid(fieldPath, p.Region, errMsg)} + + } + + provider, err := client.GetResourcesProvider(context.TODO(), "Microsoft.Resources") + if err != nil { + return field.ErrorList{field.Invalid(fieldPath, p.Region, "failed to retrieve resource capable regions")} + } + + for _, resType := range *provider.ResourceTypes { + if *resType.ResourceType == "resourceGroups" { + for _, resourceCapableRegion := range *resType.Locations { + if resourceCapableRegion == displayName { + return field.ErrorList{} + } + } + } + } + + return field.ErrorList{field.Invalid(fieldPath, p.Region, fmt.Sprintf("region %q does not support resource creation", p.Region))} +} diff --git a/pkg/asset/installconfig/azure/validation_test.go b/pkg/asset/installconfig/azure/validation_test.go index 7527b934ed8..6bbf10926cf 100644 --- a/pkg/asset/installconfig/azure/validation_test.go +++ b/pkg/asset/installconfig/azure/validation_test.go @@ -6,6 +6,8 @@ import ( "testing" aznetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network" + azres "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources" + azsubs "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-06-01/subscriptions" "github.com/golang/mock/gomock" "github.com/openshift/installer/pkg/asset/installconfig/azure/mock" "github.com/openshift/installer/pkg/ipnet" @@ -17,14 +19,18 @@ import ( type editFunctions []func(ic *types.InstallConfig) var ( - validVirtualNetwork = "valid-virtual-network" - validNetworkResourceGroup = "valid-network-resource-group" - validRegion = "centralus" - validComputeSubnet = "valid-compute-subnet" - validControlPlaneSubnet = "valid-controlplane-subnet" - validCIDR = "10.0.0.0/16" - validComputeSubnetCIDR = "10.0.0.0/24" - validControlPlaneSubnetCIDR = "10.0.32.0/24" + validVirtualNetwork = "valid-virtual-network" + validNetworkResourceGroup = "valid-network-resource-group" + validRegion = "centralus" + validRegionsList = []string{"centralus", "westus", "australiacentral2"} + resourcesCapableRegionsList = []string{"centralus", "westus"} + validComputeSubnet = "valid-compute-subnet" + validControlPlaneSubnet = "valid-controlplane-subnet" + validCIDR = "10.0.0.0/16" + validComputeSubnetCIDR = "10.0.0.0/24" + validControlPlaneSubnetCIDR = "10.0.32.0/24" + validResourceGroupNamespace = "Microsoft.Resources" + validResourceGroupResourceType = "resourceGroups" invalidateMachineCIDR = func(ic *types.InstallConfig) { _, newCidr, _ := net.ParseCIDR("192.168.111.0/24") @@ -39,7 +45,9 @@ var ( invalidateVirtualNetwork = func(ic *types.InstallConfig) { ic.Azure.VirtualNetwork = "invalid-virtual-network" } invalidateComputeSubnet = func(ic *types.InstallConfig) { ic.Azure.ComputeSubnet = "invalid-compute-subnet" } invalidateControlPlaneSubnet = func(ic *types.InstallConfig) { ic.Azure.ControlPlaneSubnet = "invalid-controlplane-subnet" } - invalidateRegion = func(ic *types.InstallConfig) { ic.Azure.Region = "eastus" } + invalidateRegion = func(ic *types.InstallConfig) { ic.Azure.Region = "neverland" } + invalidateRegionCapabilities = func(ic *types.InstallConfig) { ic.Azure.Region = "australiacentral2" } + invalidateRegionLetterCase = func(ic *types.InstallConfig) { ic.Azure.Region = "Central US" } removeVirtualNetwork = func(ic *types.InstallConfig) { ic.Azure.VirtualNetwork = "" } removeSubnets = func(ic *types.InstallConfig) { ic.Azure.ComputeSubnet, ic.Azure.ControlPlaneSubnet = "", "" } @@ -58,6 +66,25 @@ var ( AddressPrefix: &validControlPlaneSubnetCIDR, }, } + locationsAPIResult = func() *[]azsubs.Location { + r := []azsubs.Location{} + for i := 0; i < len(validRegionsList); i++ { + r = append(r, azsubs.Location{ + Name: &validRegionsList[i], + DisplayName: &validRegionsList[i], + }) + } + return &r + }() + resourcesProviderAPIResult = &azres.Provider{ + Namespace: &validResourceGroupNamespace, + ResourceTypes: &[]azres.ProviderResourceType{ + { + ResourceType: &validResourceGroupResourceType, + Locations: &resourcesCapableRegionsList, + }, + }, + } ) func validInstallConfig() *types.InstallConfig { @@ -120,6 +147,21 @@ func TestAzureInstallConfigValidation(t *testing.T) { edits: editFunctions{invalidateControlPlaneSubnet, invalidateComputeSubnet}, errorMsg: "failed to retrieve compute subnet", }, + { + name: "Invalid region", + edits: editFunctions{invalidateRegion}, + errorMsg: "region \"neverland\" is not valid or not available for this account$", + }, + { + name: "Invalid region uncapable", + edits: editFunctions{invalidateRegionCapabilities}, + errorMsg: "region \"australiacentral2\" does not support resource creation$", + }, + { + name: "Invalid region letter case", + edits: editFunctions{invalidateRegionLetterCase}, + errorMsg: "region \"Central US\" is not valid or not available for this account, did you mean \"centralus\"\\?$", + }, } mockCtrl := gomock.NewController(t) @@ -144,6 +186,12 @@ func TestAzureInstallConfigValidation(t *testing.T) { azureClient.EXPECT().GetControlPlaneSubnet(gomock.Any(), validNetworkResourceGroup, gomock.Not(validVirtualNetwork), validControlPlaneSubnet).Return(&aznetwork.Subnet{}, fmt.Errorf("invalid virtual network")).AnyTimes() azureClient.EXPECT().GetControlPlaneSubnet(gomock.Any(), validNetworkResourceGroup, validVirtualNetwork, gomock.Not(validControlPlaneSubnet)).Return(&aznetwork.Subnet{}, fmt.Errorf("invalid control plane subnet")).AnyTimes() + // Location + azureClient.EXPECT().ListLocations(gomock.Any()).Return(locationsAPIResult, nil).AnyTimes() + + // ResourceProvider + azureClient.EXPECT().GetResourcesProvider(gomock.Any(), validResourceGroupNamespace).Return(resourcesProviderAPIResult, nil).AnyTimes() + for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { editedInstallConfig := validInstallConfig() diff --git a/pkg/asset/installconfig/clustername.go b/pkg/asset/installconfig/clustername.go index da311a300a6..84a75c7881e 100644 --- a/pkg/asset/installconfig/clustername.go +++ b/pkg/asset/installconfig/clustername.go @@ -4,7 +4,7 @@ import ( survey "gopkg.in/AlecAivazis/survey.v1" "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/types/validation" + "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/validate" ) @@ -36,7 +36,9 @@ func (a *clusterName) Generate(parents asset.Parents) error { }) } validator = survey.ComposeValidators(validator, func(ans interface{}) error { - return validate.DomainName(validation.ClusterDomain(bd.BaseDomain, ans.(string)), false) + installConfig := &types.InstallConfig{BaseDomain: bd.BaseDomain} + installConfig.ObjectMeta.Name = ans.(string) + return validate.DomainName(installConfig.ClusterDomain(), false) }) return survey.Ask([]*survey.Question{ diff --git a/pkg/asset/installconfig/gcp/.mock/gcpclient_generated.go b/pkg/asset/installconfig/gcp/.mock/gcpclient_generated.go new file mode 100644 index 00000000000..452e381bf57 --- /dev/null +++ b/pkg/asset/installconfig/gcp/.mock/gcpclient_generated.go @@ -0,0 +1,111 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./client.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + compute "google.golang.org/api/compute/v1" + dns "google.golang.org/api/dns/v1" + reflect "reflect" +) + +// MockAPI is a mock of API interface. +type MockAPI struct { + ctrl *gomock.Controller + recorder *MockAPIMockRecorder +} + +// MockAPIMockRecorder is the mock recorder for MockAPI. +type MockAPIMockRecorder struct { + mock *MockAPI +} + +// NewMockAPI creates a new mock instance. +func NewMockAPI(ctrl *gomock.Controller) *MockAPI { + mock := &MockAPI{ctrl: ctrl} + mock.recorder = &MockAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAPI) EXPECT() *MockAPIMockRecorder { + return m.recorder +} + +// GetNetwork mocks base method. +func (m *MockAPI) GetNetwork(ctx context.Context, network, project string) (*compute.Network, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetwork", ctx, network, project) + ret0, _ := ret[0].(*compute.Network) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetwork indicates an expected call of GetNetwork. +func (mr *MockAPIMockRecorder) GetNetwork(ctx, network, project interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetwork", reflect.TypeOf((*MockAPI)(nil).GetNetwork), ctx, network, project) +} + +// GetPublicDomains mocks base method. +func (m *MockAPI) GetPublicDomains(ctx context.Context, project string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicDomains", ctx, project) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPublicDomains indicates an expected call of GetPublicDomains. +func (mr *MockAPIMockRecorder) GetPublicDomains(ctx, project interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicDomains", reflect.TypeOf((*MockAPI)(nil).GetPublicDomains), ctx, project) +} + +// GetPublicDNSZone mocks base method. +func (m *MockAPI) GetPublicDNSZone(ctx context.Context, baseDomain, project string) (*dns.ManagedZone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicDNSZone", ctx, baseDomain, project) + ret0, _ := ret[0].(*dns.ManagedZone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPublicDNSZone indicates an expected call of GetPublicDNSZone. +func (mr *MockAPIMockRecorder) GetPublicDNSZone(ctx, baseDomain, project interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicDNSZone", reflect.TypeOf((*MockAPI)(nil).GetPublicDNSZone), ctx, baseDomain, project) +} + +// GetSubnetworks mocks base method. +func (m *MockAPI) GetSubnetworks(ctx context.Context, network, project, region string) ([]*compute.Subnetwork, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetworks", ctx, network, project, region) + ret0, _ := ret[0].([]*compute.Subnetwork) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetworks indicates an expected call of GetSubnetworks. +func (mr *MockAPIMockRecorder) GetSubnetworks(ctx, network, project, region interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetworks", reflect.TypeOf((*MockAPI)(nil).GetSubnetworks), ctx, network, project, region) +} + +// GetListOfProjects mocks base method. +func (m *MockAPI) GetListOfProjects(ctx context.Context) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetListOfProjects", ctx) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetListOfProjects indicates an expected call of GetListOfProjects. +func (mr *MockAPIMockRecorder) GetListOfProjects(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetListOfProjects", reflect.TypeOf((*MockAPI)(nil).GetListOfProjects), ctx) +} diff --git a/pkg/asset/installconfig/gcp/client.go b/pkg/asset/installconfig/gcp/client.go index 3eaf5218f2b..dd19d5587f5 100644 --- a/pkg/asset/installconfig/gcp/client.go +++ b/pkg/asset/installconfig/gcp/client.go @@ -7,6 +7,7 @@ import ( "time" "github.com/pkg/errors" + "google.golang.org/api/cloudresourcemanager/v1" compute "google.golang.org/api/compute/v1" dns "google.golang.org/api/dns/v1" "google.golang.org/api/option" @@ -20,6 +21,7 @@ type API interface { GetPublicDomains(ctx context.Context, project string) ([]string, error) GetPublicDNSZone(ctx context.Context, baseDomain, project string) (*dns.ManagedZone, error) GetSubnetworks(ctx context.Context, network, project, region string) ([]*compute.Subnetwork, error) + GetProjects(ctx context.Context) (map[string]string, error) } // Client makes calls to the GCP API. @@ -151,3 +153,35 @@ func (c *Client) getDNSService(ctx context.Context) (*dns.Service, error) { } return svc, nil } + +// GetProjects gets the list of project names and ids associated with the current user in the form +// of a map whose keys are ids and values are names. +func (c *Client) GetProjects(ctx context.Context) (map[string]string, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) + defer cancel() + + svc, err := c.getCloudResourceService(ctx) + if err != nil { + return nil, err + } + + req := svc.Projects.List() + projects := make(map[string]string) + if err := req.Pages(ctx, func(page *cloudresourcemanager.ListProjectsResponse) error { + for _, project := range page.Projects { + projects[project.ProjectId] = project.Name + } + return nil + }); err != nil { + return nil, err + } + return projects, nil +} + +func (c *Client) getCloudResourceService(ctx context.Context) (*cloudresourcemanager.Service, error) { + svc, err := cloudresourcemanager.NewService(ctx, option.WithCredentials(c.ssn.Credentials)) + if err != nil { + return nil, errors.Wrap(err, "failed to create cloud resource service") + } + return svc, nil +} diff --git a/pkg/asset/installconfig/gcp/gcp.go b/pkg/asset/installconfig/gcp/gcp.go index 9cea09287e4..8f3dbcb718a 100644 --- a/pkg/asset/installconfig/gcp/gcp.go +++ b/pkg/asset/installconfig/gcp/gcp.go @@ -7,10 +7,11 @@ import ( "strings" "time" + "gopkg.in/AlecAivazis/survey.v1" + "github.com/openshift/installer/pkg/types/gcp" "github.com/openshift/installer/pkg/types/gcp/validation" "github.com/pkg/errors" - "gopkg.in/AlecAivazis/survey.v1" ) // Platform collects GCP-specific configuration. @@ -37,16 +38,43 @@ func selectProject(ctx context.Context) (string, error) { } defaultProject := ssn.Credentials.ProjectID + client := &Client{ + ssn: ssn, + } + + projects, err := client.GetProjects(ctx) + if err != nil { + return "", errors.Wrap(err, "failed to get projects") + } + + var options []string + ids := make(map[string]string) + + var defaultValue string + + for id, name := range projects { + option := fmt.Sprintf("%s (%s)", name, id) + ids[option] = id + if id == defaultProject { + defaultValue = option + } + options = append(options, option) + } + sort.Strings(options) + var selectedProject string err = survey.Ask([]*survey.Question{ { - Prompt: &survey.Input{ + Prompt: &survey.Select{ Message: "Project ID", Help: "The project id where the cluster will be provisioned. The default is taken from the provided service account.", - Default: defaultProject, + Default: defaultValue, + Options: options, }, }, }, &selectedProject) + + selectedProject = ids[selectedProject] return selectedProject, nil } diff --git a/pkg/asset/installconfig/gcp/mock/gcpclient_generated.go b/pkg/asset/installconfig/gcp/mock/gcpclient_generated.go index 35041058b1d..dce3baeb7e6 100644 --- a/pkg/asset/installconfig/gcp/mock/gcpclient_generated.go +++ b/pkg/asset/installconfig/gcp/mock/gcpclient_generated.go @@ -94,3 +94,18 @@ func (mr *MockAPIMockRecorder) GetSubnetworks(ctx, network, project, region inte mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetworks", reflect.TypeOf((*MockAPI)(nil).GetSubnetworks), ctx, network, project, region) } + +// GetProjects mocks base method +func (m *MockAPI) GetProjects(ctx context.Context) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProjects", ctx) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProjects indicates an expected call of GetProjects +func (mr *MockAPIMockRecorder) GetProjects(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProjects", reflect.TypeOf((*MockAPI)(nil).GetProjects), ctx) +} diff --git a/pkg/asset/installconfig/gcp/validation.go b/pkg/asset/installconfig/gcp/validation.go index 541abdfaa6d..25e3b8d22fe 100644 --- a/pkg/asset/installconfig/gcp/validation.go +++ b/pkg/asset/installconfig/gcp/validation.go @@ -15,11 +15,28 @@ import ( func Validate(client API, ic *types.InstallConfig) error { allErrs := field.ErrorList{} + allErrs = append(allErrs, validateProject(client, ic, field.NewPath("platform").Child("gcp"))...) allErrs = append(allErrs, validateNetworks(client, ic, field.NewPath("platform").Child("gcp"))...) return allErrs.ToAggregate() } +func validateProject(client API, ic *types.InstallConfig, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if ic.GCP.ProjectID != "" { + projects, err := client.GetProjects(context.TODO()) + if err != nil { + return append(allErrs, field.InternalError(fieldPath.Child("project"), err)) + } + if _, found := projects[ic.GCP.ProjectID]; !found { + return append(allErrs, field.Invalid(fieldPath.Child("project"), ic.GCP.ProjectID, "invalid project ID")) + } + } + + return allErrs +} + // validateNetworks checks that the user-provided VPC is in the project and the provided subnets are valid. func validateNetworks(client API, ic *types.InstallConfig, fieldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} diff --git a/pkg/asset/installconfig/gcp/validation_test.go b/pkg/asset/installconfig/gcp/validation_test.go index 53fe84d288e..0f1922c7b2d 100644 --- a/pkg/asset/installconfig/gcp/validation_test.go +++ b/pkg/asset/installconfig/gcp/validation_test.go @@ -138,11 +138,19 @@ func TestGCPInstallConfigValidation(t *testing.T) { expectedError: true, expectedErrMsg: "network: Invalid value", }, + { + name: "Invalid project ID", + edits: editFunctions{invalidateProject, removeSubnets, removeVPC}, + expectedError: true, + expectedErrMsg: "platform.gcp.project: Invalid value: \"invalid-project\": invalid project ID", + }, } mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() gcpClient := mock.NewMockAPI(mockCtrl) + // Should get the list of projects. + gcpClient.EXPECT().GetProjects(gomock.Any()).Return(map[string]string{"valid-project": "valid-project"}, nil).AnyTimes() // When passed the correct network & project, return an empty network, which should be enough to validate ok. gcpClient.EXPECT().GetNetwork(gomock.Any(), validNetworkName, validProjectName).Return(&compute.Network{}, nil).AnyTimes() diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go index b3049e005e3..76d7882bfb8 100644 --- a/pkg/asset/installconfig/installconfig.go +++ b/pkg/asset/installconfig/installconfig.go @@ -131,7 +131,7 @@ func (a *InstallConfig) finish(filename string) error { defaults.SetInstallConfigDefaults(a.Config) if a.Config.AWS != nil { - a.AWS = aws.NewMetadata(a.Config.Platform.AWS.Region, a.Config.Platform.AWS.Subnets) + a.AWS = aws.NewMetadata(a.Config.Platform.AWS.Region, a.Config.Platform.AWS.Subnets, a.Config.AWS.ServiceEndpoints) } if err := validation.ValidateInstallConfig(a.Config, icopenstack.NewValidValuesFetcher()).ToAggregate(); err != nil { diff --git a/pkg/asset/installconfig/openstack/realvalidvaluesfetcher.go b/pkg/asset/installconfig/openstack/realvalidvaluesfetcher.go index b40c494cb42..7e9916175f5 100644 --- a/pkg/asset/installconfig/openstack/realvalidvaluesfetcher.go +++ b/pkg/asset/installconfig/openstack/realvalidvaluesfetcher.go @@ -9,6 +9,7 @@ import ( netext "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" "github.com/gophercloud/utils/openstack/clientconfig" "github.com/openshift/installer/pkg/types/openstack/validation" @@ -191,3 +192,21 @@ func (f realValidValuesFetcher) GetFloatingIPNames(cloud string, floatingNetwork return floatingIPNames, nil } + +func (f realValidValuesFetcher) GetSubnetCIDR(cloud string, subnetID string) (string, error) { + opts := &clientconfig.ClientOpts{ + Cloud: cloud, + } + + networkClient, err := clientconfig.NewServiceClient("network", opts) + if err != nil { + return "", err + } + + subnet, err := subnets.Get(networkClient, subnetID).Extract() + if err != nil { + return "", err + } + + return subnet.CIDR, nil +} diff --git a/pkg/asset/installconfig/ovirt/cluster.go b/pkg/asset/installconfig/ovirt/cluster.go index f889a3feac0..f7cc2735352 100644 --- a/pkg/asset/installconfig/ovirt/cluster.go +++ b/pkg/asset/installconfig/ovirt/cluster.go @@ -29,8 +29,8 @@ func askCluster(c *ovirtsdk4.Connection, p *ovirt.Platform) (string, error) { clusterNames = append(clusterNames, cluster.MustName()) } err = survey.AskOne(&survey.Select{ - Message: "Select the oVirt cluster", - Help: "The oVirt cluster where the VMs will be created", + Message: "oVirt cluster", + Help: "The oVirt cluster where the VMs will be created.", Options: clusterNames, }, &clusterName, diff --git a/pkg/asset/installconfig/ovirt/config.go b/pkg/asset/installconfig/ovirt/config.go index afd02bbe734..656fbbf851d 100644 --- a/pkg/asset/installconfig/ovirt/config.go +++ b/pkg/asset/installconfig/ovirt/config.go @@ -68,9 +68,9 @@ func (c *Config) Save() error { } path := discoverPath() - err = os.MkdirAll(filepath.Dir(path), os.FileMode(700)) + err = os.MkdirAll(filepath.Dir(path), 0700) if err != nil { return err } - return ioutil.WriteFile(path, out, os.FileMode(0600)) + return ioutil.WriteFile(path, out, 0600) } diff --git a/pkg/asset/installconfig/ovirt/credentials.go b/pkg/asset/installconfig/ovirt/credentials.go index 1d8427fdc79..3afecceac52 100644 --- a/pkg/asset/installconfig/ovirt/credentials.go +++ b/pkg/asset/installconfig/ovirt/credentials.go @@ -4,6 +4,7 @@ import ( "fmt" "net/url" + "github.com/sirupsen/logrus" "gopkg.in/AlecAivazis/survey.v1" ) @@ -12,8 +13,8 @@ func askCredentials() (Config, error) { err := survey.Ask([]*survey.Question{ { Prompt: &survey.Input{ - Message: "Enter oVirt's api endpoint URL", - Help: "oVirt engine api url, for example https://ovirt-engine-fqdn/ovirt-engine/api", + Message: "oVirt API endpoint URL", + Help: "The URL of the oVirt engine API. For example, https://ovirt-engine-fqdn/ovirt-engine/api.", }, Validate: survey.ComposeValidators(survey.Required), }, @@ -25,9 +26,9 @@ func askCredentials() (Config, error) { var ovirtCertTrusted bool err = survey.AskOne( &survey.Confirm{ - Message: "Is the installed oVirt certificate trusted?", + Message: "Is the oVirt CA trusted locally?", Default: true, - Help: "", + Help: "In order to securly communicate with the oVirt engine, the certificate authority must be trusted by the local system.", }, &ovirtCertTrusted, nil) @@ -48,21 +49,23 @@ func askCredentials() (Config, error) { ovirtURL.Host) err = survey.AskOne(&survey.Multiline{ - Message: "Enter oVirt's CA bundle", - Help: "Obtain oVirt CA bundle from " + pemURL, + Message: "oVirt certificate bundle", + Help: fmt.Sprintf("The oVirt certificate bundle can be downloaded from %s.", pemURL), }, &c.CABundle, survey.ComposeValidators(survey.Required)) if err != nil { return c, err } + } else { + logrus.Warning("Communication with the oVirt engine will be insecure.") } err = survey.Ask([]*survey.Question{ { Prompt: &survey.Input{ - Message: "Enter ovirt-engine username", - Help: "The user must have permissions to create VMs and disks on the Storage Domain with the same name as the OpenShift cluster", + Message: "oVirt engine username", + Help: "The user must have permissions to create VMs and disks on the Storage Domain with the same name as the OpenShift cluster.", Default: "admin@internal", }, Validate: survey.ComposeValidators(survey.Required), @@ -75,7 +78,7 @@ func askCredentials() (Config, error) { err = survey.Ask([]*survey.Question{ { Prompt: &survey.Password{ - Message: "Enter password", + Message: "oVirt engine password", Help: "", }, Validate: survey.ComposeValidators(survey.Required, authenticated(&c)), diff --git a/pkg/asset/installconfig/ovirt/network.go b/pkg/asset/installconfig/ovirt/network.go index a05b02c6579..6690a3aa757 100644 --- a/pkg/asset/installconfig/ovirt/network.go +++ b/pkg/asset/installconfig/ovirt/network.go @@ -29,9 +29,8 @@ func askNetwork(c *ovirtsdk4.Connection, p *ovirt.Platform) error { networkNames = append(networkNames, network.MustName()) } err = survey.AskOne(&survey.Select{ - Message: "Select the oVirt network", - Help: "The oVirt network of the deployed VMs. 'ovirtmgmt' is the default network - it is recommended " + - "to work with a dedicated network per OpenShift cluster", + Message: "oVirt network", + Help: "The oVirt network of the deployed VMs. 'ovirtmgmt' is the default network. It is recommended to use a dedicated network for each OpenShift cluster.", Options: networkNames, }, &networkName, diff --git a/pkg/asset/installconfig/ovirt/ovirt.go b/pkg/asset/installconfig/ovirt/ovirt.go index f97054f366b..705d069df44 100644 --- a/pkg/asset/installconfig/ovirt/ovirt.go +++ b/pkg/asset/installconfig/ovirt/ovirt.go @@ -55,8 +55,8 @@ func Platform() (*ovirt.Platform, error) { err = survey.Ask([]*survey.Question{ { Prompt: &survey.Input{ - Message: "Enter the internal API Virtual IP", - Help: "Make sure the IP address is not in use", + Message: "Internal API virtual IP", + Help: "This is the virtual IP address that will be used to address the OpenShift control plane. Make sure the IP address is not in use.", Default: "", }, Validate: survey.ComposeValidators(survey.Required), @@ -69,8 +69,8 @@ func Platform() (*ovirt.Platform, error) { err = survey.Ask([]*survey.Question{ { Prompt: &survey.Input{ - Message: "Enter the internal DNS Virtual IP", - Help: "Make sure the IP address is not in use", + Message: "Internal DNS virtual IP", + Help: "This is the virtual IP address that will be used to address the DNS server internal to the cluster. Make sure the IP address is not in use.", Default: "", }, Validate: survey.ComposeValidators(survey.Required), @@ -83,8 +83,8 @@ func Platform() (*ovirt.Platform, error) { err = survey.Ask([]*survey.Question{ { Prompt: &survey.Input{ - Message: "Enter the ingress IP ", - Help: "Make sure the IP address is not in use", + Message: "Ingress virtual IP", + Help: "This is the virtual IP address that will be used to address the OpenShift ingress routers. Make sure the IP address is not in use.", Default: "", }, Validate: survey.ComposeValidators(survey.Required), diff --git a/pkg/asset/installconfig/ovirt/storage.go b/pkg/asset/installconfig/ovirt/storage.go index d05c1d9220b..614d4dd16d8 100644 --- a/pkg/asset/installconfig/ovirt/storage.go +++ b/pkg/asset/installconfig/ovirt/storage.go @@ -29,8 +29,8 @@ func askStorage(c *ovirtsdk4.Connection, p *ovirt.Platform, clusterName string) domainNames = append(domainNames, domain.MustName()) } err = survey.AskOne(&survey.Select{ - Message: "Select the oVirt storage domain", - Help: "The storage domain will be used to create the disks of all the cluster nodes", + Message: "oVirt storage domain", + Help: "The storage domain will be used to create the disks of all the cluster nodes.", Options: domainNames, }, &storageDomainName, diff --git a/pkg/asset/installconfig/vsphere/validation_test.go b/pkg/asset/installconfig/vsphere/validation_test.go index e6cd958f80c..87290ae74fb 100644 --- a/pkg/asset/installconfig/vsphere/validation_test.go +++ b/pkg/asset/installconfig/vsphere/validation_test.go @@ -33,7 +33,6 @@ func validIPIInstallConfig() *types.InstallConfig { VCenter: "valid_vcenter", APIVIP: "192.168.111.0", IngressVIP: "192.168.111.1", - DNSVIP: "192.168.111.2", }, }, } diff --git a/pkg/asset/installconfig/vsphere/vsphere.go b/pkg/asset/installconfig/vsphere/vsphere.go index a4d4a20ff5f..e885312a8db 100644 --- a/pkg/asset/installconfig/vsphere/vsphere.go +++ b/pkg/asset/installconfig/vsphere/vsphere.go @@ -2,10 +2,350 @@ package vsphere import ( + "context" + "fmt" + "sort" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vim25" + "gopkg.in/AlecAivazis/survey.v1" + "github.com/openshift/installer/pkg/types/vsphere" + vspheretypes "github.com/openshift/installer/pkg/types/vsphere" + "github.com/openshift/installer/pkg/validate" ) +const root = "/..." +const distributedVirtualPortGroupType = "DistributedVirtualPortgroup" +const networkType = "Network" + +// vCenterClient contains the login info/creds and client for the vCenter. +// They are contained in a single struct to facilitate client creation +// serving as validation of the vCenter, username, and password fields. +type vCenterClient struct { + VCenter string + Username string + Password string + Client *vim25.Client + RestClient *rest.Client +} + +// networkNamer declares an interface for the object.Common.Name() function. +// This is needed because find.NetworkList() returns the interface object.NetworkReference. +// All of the types that implement object.NetworkReference (OpaqueNetwork, +// DistributedVirtualPortgroup, & DistributedVirtualSwitch) and perhaps all +// types in general embed object.Common. +type networkNamer interface { + Name() string +} + // Platform collects vSphere-specific configuration. func Platform() (*vsphere.Platform, error) { - return &vsphere.Platform{}, nil + vCenter, err := getClients() + if err != nil { + return nil, err + } + + finder := find.NewFinder(vCenter.Client) + ctx := context.TODO() + + dc, dcPath, err := getDataCenter(ctx, finder, vCenter.Client) + if err != nil { + return nil, err + } + + cluster, err := getCluster(ctx, dcPath, finder, vCenter.Client) + if err != nil { + return nil, err + } + + datastore, err := getDataStore(ctx, dcPath, finder, vCenter.Client) + if err != nil { + return nil, err + } + + network, err := getNetwork(ctx, dcPath, finder, vCenter.Client) + if err != nil { + return nil, err + } + + apiVIP, ingressVIP := getVIPs() + + platform := &vsphere.Platform{ + Datacenter: dc, + Cluster: cluster, + DefaultDatastore: datastore, + Network: network, + VCenter: vCenter.VCenter, + Username: vCenter.Username, + Password: vCenter.Password, + APIVIP: apiVIP, + IngressVIP: ingressVIP, + } + return platform, nil +} + +// getClients() surveys the user for username, password, & vcenter. +// Validation on the three fields is performed by creating a client. +// If creating the client fails, an error is returned. +func getClients() (*vCenterClient, error) { + var vcenter, username, password string + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Input{ + Message: "vCenter", + Help: "The hostname of the vCenter to be used for installation.", + }, + Validate: survey.Required, + }, + }, &vcenter) + + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Input{ + Message: "Username", + Help: "The username to login to the vCenter.", + }, + Validate: survey.Required, + }, + }, &username) + + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Password{ + Message: "Password", + Help: "The password to login to the vCenter.", + }, + Validate: survey.Required, + }, + }, &password) + + // There is a noticeable delay when creating the client, so let the user know what's going on. + logrus.Infof("Connecting to vCenter %s", vcenter) + vim25Client, restClient, err := vspheretypes.CreateVSphereClients(context.TODO(), + vcenter, + username, + password) + + // Survey does not allow validation of groups of input + // so we perform our own validation. + if err != nil { + return nil, errors.Wrapf(err, "unable to connect to vCenter %s. Ensure provided information is correct and client certs have been added to system trust.", vcenter) + } + + return &vCenterClient{ + VCenter: vcenter, + Username: username, + Password: password, + Client: vim25Client, + RestClient: restClient, + }, nil +} + +// getDataCenter searches the root for all datacenters and, if there is more than one, lets the user select +// one to use for installation. Returns the name and path of the selected datacenter. The name is used +// to generate the install config and the path is used to determine the options for cluster, datastore and network. +func getDataCenter(ctx context.Context, finder *find.Finder, client *vim25.Client) (string, string, error) { + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + + dataCenters, err := finder.DatacenterList(ctx, root) + if err != nil { + return "", "", errors.Wrap(err, "unable to list datacenters") + } + + // API returns an error when no results, but let's leave this in to be defensive. + if len(dataCenters) == 0 { + return "", "", errors.New("did not find any datacenters") + } + if len(dataCenters) == 1 { + logrus.Infof("Defaulting to only available datacenter: %s", dataCenters[0].Name()) + dc := dataCenters[0] + return dc.Name(), formatPath(dc.InventoryPath), nil + } + + dataCenterPaths := make(map[string]string) + var dataCenterChoices []string + for _, dc := range dataCenters { + dataCenterPaths[dc.Name()] = dc.InventoryPath + dataCenterChoices = append(dataCenterChoices, dc.Name()) + } + sort.Strings(dataCenterChoices) + + var selectedDataCenter string + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Select{ + Message: "Datacenter", + Options: dataCenterChoices, + Help: "The Datacenter to be used for installation.", + }, + Validate: survey.Required, + }, + }, &selectedDataCenter) + selectedDataCenterPath := formatPath(dataCenterPaths[selectedDataCenter]) + return selectedDataCenter, selectedDataCenterPath, nil +} + +func getCluster(ctx context.Context, path string, finder *find.Finder, client *vim25.Client) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + + clusters, err := finder.ClusterComputeResourceList(ctx, path) + if err != nil { + return "", errors.Wrap(err, "unable to list clusters") + } + + // API returns an error when no results, but let's leave this in to be defensive. + if len(clusters) == 0 { + return "", errors.New("did not find any clusters") + } + if len(clusters) == 1 { + logrus.Infof("Defaulting to only available cluster: %s", clusters[0].Name()) + return clusters[0].Name(), nil + } + + var clusterChoices []string + for _, c := range clusters { + clusterChoices = append(clusterChoices, c.Name()) + } + sort.Strings(clusterChoices) + + var selectedcluster string + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Select{ + Message: "Cluster", + Options: clusterChoices, + Help: "The cluster to be used for installation.", + }, + Validate: survey.Required, + }, + }, &selectedcluster) + + return selectedcluster, nil +} + +func getDataStore(ctx context.Context, path string, finder *find.Finder, client *vim25.Client) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + + dataStores, err := finder.DatastoreList(ctx, path) + if err != nil { + return "", errors.Wrap(err, "unable to list datastores") + } + + // API returns an error when no results, but let's leave this in to be defensive. + if len(dataStores) == 0 { + return "", errors.New("did not find any datastores") + } + if len(dataStores) == 1 { + logrus.Infof("Defaulting to only available datastore: %s", dataStores[0].Name()) + return dataStores[0].Name(), nil + } + + var dataStoreChoices []string + for _, ds := range dataStores { + dataStoreChoices = append(dataStoreChoices, ds.Name()) + } + sort.Strings(dataStoreChoices) + + var selectedDataStore string + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Select{ + Message: "Default Datastore", + Options: dataStoreChoices, + Help: "The default datastore to be used for installation.", + }, + Validate: survey.Required, + }, + }, &selectedDataStore) + return selectedDataStore, nil +} + +func getNetwork(ctx context.Context, path string, finder *find.Finder, client *vim25.Client) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + + networks, err := finder.NetworkList(ctx, path) + if err != nil { + return "", errors.Wrap(err, "unable to list networks") + } + + // API returns an error when no results, but let's leave this in to be defensive. + if len(networks) == 0 { + return "", errors.New("did not find any networks") + } + if len(networks) == 1 { + n := networks[0].(networkNamer) + logrus.Infof("Defaulting to only available network: %s", n.Name()) + return n.Name(), nil + } + + var networkChoices []string + for _, network := range networks { + if network.Reference().Type == distributedVirtualPortGroupType || network.Reference().Type == networkType { + n := network.(networkNamer) + networkChoices = append(networkChoices, n.Name()) + } + } + if len(networkChoices) == 0 { + return "", errors.New("could not find any networks of the type DistributedVirtualPortgroup or Network") + } + sort.Strings(networkChoices) + + var selectednetwork string + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Select{ + Message: "Network", + Options: networkChoices, + Help: "The network to be used for installation.", + }, + Validate: survey.Required, + }, + }, &selectednetwork) + + return selectednetwork, nil +} + +func getVIPs() (string, string) { + var apiVIP, ingressVIP string + + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Input{ + Message: "Virtual IP Address for API", + Help: "The VIP to be used for the OpenShift API.", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + return validate.IP((ans).(string)) + }), + }, + }, &apiVIP) + + survey.Ask([]*survey.Question{ + { + Prompt: &survey.Input{ + Message: "Virtual IP Address for Ingress", + Help: "The VIP to be used for ingress to the cluster.", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + return validate.IP((ans).(string)) + }), + }, + }, &ingressVIP) + return apiVIP, ingressVIP +} + +// formatPath is a helper function that appends "/..." to enable recursive +// find in a root object. For details, see the introduction at: +// https://godoc.org/github.com/vmware/govmomi/find +func formatPath(rootObject string) string { + return fmt.Sprintf("%s/...", rootObject) } diff --git a/pkg/asset/machines/aws/machines.go b/pkg/asset/machines/aws/machines.go index a75e1891f22..3e12865fbbb 100644 --- a/pkg/asset/machines/aws/machines.go +++ b/pkg/asset/machines/aws/machines.go @@ -79,7 +79,6 @@ func Machines(clusterID string, region string, subnets map[string]string, pool * } func provider(clusterID string, region string, subnet string, instanceType string, root *aws.EC2RootVolume, osImage string, zone, role, userDataSecret string, userTags map[string]string) (*awsprovider.AWSMachineProviderConfig, error) { - amiID := osImage tags, err := tagsFromUserTags(clusterID, userTags) if err != nil { return nil, errors.Wrap(err, "failed to create awsprovider.TagSpecifications from UserTags") @@ -102,7 +101,6 @@ func provider(clusterID string, region string, subnet string, instanceType strin }, }, }, - AMI: awsprovider.AWSResourceReference{ID: &amiID}, Tags: tags, IAMInstanceProfile: &awsprovider.AWSResourceReference{ID: pointer.StringPtr(fmt.Sprintf("%s-%s-profile", clusterID, role))}, UserDataSecret: &corev1.LocalObjectReference{Name: userDataSecret}, @@ -125,6 +123,15 @@ func provider(clusterID string, region string, subnet string, instanceType strin config.Subnet.ID = pointer.StringPtr(subnet) } + if osImage == "" { + config.AMI.Filters = []awsprovider.Filter{{ + Name: "tag:Name", + Values: []string{fmt.Sprintf("%s-ami-%s", clusterID, region)}, + }} + } else { + config.AMI.ID = pointer.StringPtr(osImage) + } + return config, nil } diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index ad1a1406b4a..4f4645206c7 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/ghodss/yaml" baremetalapi "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis" @@ -17,7 +18,7 @@ import ( ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" vsphereapi "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider" - vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1" + vsphereprovider "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -159,7 +160,14 @@ func (m *Master) Generate(dependencies asset.Parents) error { } mpool := defaultAWSMachinePoolPlatform() - mpool.AMIID = string(*rhcosImage) + + osImage := strings.SplitN(string(*rhcosImage), ",", 2) + osImageID := osImage[0] + if len(osImage) == 2 { + osImageID = "" // the AMI will be generated later on + } + mpool.AMIID = osImageID + mpool.Set(ic.Platform.AWS.DefaultMachinePlatform) mpool.Set(pool.Platform.AWS) if len(mpool.Zones) == 0 { diff --git a/pkg/asset/machines/openstack/machines.go b/pkg/asset/machines/openstack/machines.go index aa3d4d89105..f0bfd741e28 100644 --- a/pkg/asset/machines/openstack/machines.go +++ b/pkg/asset/machines/openstack/machines.go @@ -86,35 +86,54 @@ func Machines(clusterID string, config *types.InstallConfig, pool *types.Machine } func generateProvider(clusterID string, platform *openstack.Platform, mpool *openstack.MachinePool, osImage string, az string, role, userDataSecret string, trunk string) *openstackprovider.OpenstackProviderSpec { + var networks []openstackprovider.NetworkParam + if platform.MachinesSubnet != "" { + networks = []openstackprovider.NetworkParam{{ + Subnets: []openstackprovider.SubnetParam{{ + UUID: platform.MachinesSubnet, + }}}, + } + } else { + networks = []openstackprovider.NetworkParam{{ + Subnets: []openstackprovider.SubnetParam{{ + Filter: openstackprovider.SubnetFilter{ + Name: fmt.Sprintf("%s-nodes", clusterID), + Tags: fmt.Sprintf("%s=%s", "openshiftClusterID", clusterID), + }}, + }}, + } + } + for _, networkID := range mpool.AdditionalNetworkIDs { + networks = append(networks, openstackprovider.NetworkParam{ + UUID: networkID, + NoAllowedAddressPairs: true, + }) + } + + securityGroups := []openstackprovider.SecurityGroupParam{ + { + Name: fmt.Sprintf("%s-%s", clusterID, role), + }, + } + for _, sg := range mpool.AdditionalSecurityGroupIDs { + securityGroups = append(securityGroups, openstackprovider.SecurityGroupParam{ + UUID: sg, + }) + } spec := openstackprovider.OpenstackProviderSpec{ TypeMeta: metav1.TypeMeta{ APIVersion: openstackprovider.SchemeGroupVersion.String(), Kind: "OpenstackProviderSpec", }, - Flavor: mpool.FlavorName, - CloudName: CloudName, - CloudsSecret: &corev1.SecretReference{Name: cloudsSecret, Namespace: cloudsSecretNamespace}, - UserDataSecret: &corev1.SecretReference{Name: userDataSecret}, - Networks: []openstackprovider.NetworkParam{ - { - Subnets: []openstackprovider.SubnetParam{ - { - Filter: openstackprovider.SubnetFilter{ - Name: fmt.Sprintf("%s-nodes", clusterID), - Tags: fmt.Sprintf("%s=%s", "openshiftClusterID", clusterID), - }, - }, - }, - }, - }, + Flavor: mpool.FlavorName, + CloudName: CloudName, + CloudsSecret: &corev1.SecretReference{Name: cloudsSecret, Namespace: cloudsSecretNamespace}, + UserDataSecret: &corev1.SecretReference{Name: userDataSecret}, + Networks: networks, AvailabilityZone: az, - SecurityGroups: []openstackprovider.SecurityGroupParam{ - { - Name: fmt.Sprintf("%s-%s", clusterID, role), - }, - }, - Trunk: trunkSupportBoolean(trunk), + SecurityGroups: securityGroups, + Trunk: trunkSupportBoolean(trunk), Tags: []string{ fmt.Sprintf("openshiftClusterID=%s", clusterID), }, diff --git a/pkg/asset/machines/vsphere/machines.go b/pkg/asset/machines/vsphere/machines.go index 09d157168b6..06c10768e25 100644 --- a/pkg/asset/machines/vsphere/machines.go +++ b/pkg/asset/machines/vsphere/machines.go @@ -5,7 +5,7 @@ import ( "fmt" machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - vsphereapis "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1" + vsphereapis "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index f224f37dfe4..8b9dac85c97 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/ghodss/yaml" baremetalapi "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis" @@ -195,7 +196,14 @@ func (w *Worker) Generate(dependencies asset.Parents) error { } mpool := defaultAWSMachinePoolPlatform() - mpool.AMIID = string(*rhcosImage) + + osImage := strings.SplitN(string(*rhcosImage), ",", 2) + osImageID := osImage[0] + if len(osImage) == 2 { + osImageID = "" // the AMI will be generated later on + } + mpool.AMIID = osImageID + mpool.Set(ic.Platform.AWS.DefaultMachinePlatform) mpool.Set(pool.Platform.AWS) if len(mpool.Zones) == 0 { diff --git a/pkg/asset/manifests/dns.go b/pkg/asset/manifests/dns.go index 7d96b5c9672..c8407b9f847 100644 --- a/pkg/asset/manifests/dns.go +++ b/pkg/asset/manifests/dns.go @@ -81,7 +81,11 @@ func (d *DNS) Generate(dependencies asset.Parents) error { switch installConfig.Config.Platform.Name() { case awstypes.Name: if installConfig.Config.Publish == types.ExternalPublishingStrategy { - zone, err := icaws.GetPublicZone(installConfig.Config.BaseDomain) + sess, err := installConfig.AWS.Session(context.TODO()) + if err != nil { + return errors.Wrap(err, "failed to initialize session") + } + zone, err := icaws.GetPublicZone(sess, installConfig.Config.BaseDomain) if err != nil { return errors.Wrapf(err, "getting public zone for %q", installConfig.Config.BaseDomain) } diff --git a/pkg/asset/manifests/infrastructure.go b/pkg/asset/manifests/infrastructure.go index 887cc5ffdb2..cbcccd22a74 100644 --- a/pkg/asset/manifests/infrastructure.go +++ b/pkg/asset/manifests/infrastructure.go @@ -3,6 +3,7 @@ package manifests import ( "fmt" "path/filepath" + "sort" "github.com/ghodss/yaml" "github.com/pkg/errors" @@ -70,6 +71,9 @@ func (i *Infrastructure) Generate(dependencies asset.Parents) error { Name: "cluster", // not namespaced }, + Spec: configv1.InfrastructureSpec{ + PlatformSpec: configv1.PlatformSpec{}, + }, Status: configv1.InfrastructureStatus{ InfrastructureName: clusterID.InfraID, APIServerURL: getAPIServerURL(installConfig.Config), @@ -81,12 +85,28 @@ func (i *Infrastructure) Generate(dependencies asset.Parents) error { switch installConfig.Config.Platform.Name() { case aws.Name: - config.Status.PlatformStatus.Type = configv1.AWSPlatformType + config.Spec.PlatformSpec.Type = configv1.AWSPlatformType + config.Spec.PlatformSpec.AWS = &configv1.AWSPlatformSpec{} config.Status.PlatformStatus.AWS = &configv1.AWSPlatformStatus{ Region: installConfig.Config.Platform.AWS.Region, } + + for _, service := range installConfig.Config.Platform.AWS.ServiceEndpoints { + config.Spec.PlatformSpec.AWS.ServiceEndpoints = append(config.Spec.PlatformSpec.AWS.ServiceEndpoints, configv1.AWSServiceEndpoint{ + Name: service.Name, + URL: service.URL, + }) + config.Status.PlatformStatus.AWS.ServiceEndpoints = append(config.Status.PlatformStatus.AWS.ServiceEndpoints, configv1.AWSServiceEndpoint{ + Name: service.Name, + URL: service.URL, + }) + sort.Slice(config.Status.PlatformStatus.AWS.ServiceEndpoints, func(i, j int) bool { + return config.Status.PlatformStatus.AWS.ServiceEndpoints[i].Name < + config.Status.PlatformStatus.AWS.ServiceEndpoints[j].Name + }) + } case azure.Name: - config.Status.PlatformStatus.Type = configv1.AzurePlatformType + config.Spec.PlatformSpec.Type = configv1.AzurePlatformType rg := fmt.Sprintf("%s-rg", clusterID.InfraID) config.Status.PlatformStatus.Azure = &configv1.AzurePlatformStatus{ @@ -97,14 +117,14 @@ func (i *Infrastructure) Generate(dependencies asset.Parents) error { config.Status.PlatformStatus.Azure.NetworkResourceGroupName = nrg } case baremetal.Name: - config.Status.PlatformStatus.Type = configv1.BareMetalPlatformType + config.Spec.PlatformSpec.Type = configv1.BareMetalPlatformType config.Status.PlatformStatus.BareMetal = &configv1.BareMetalPlatformStatus{ APIServerInternalIP: installConfig.Config.Platform.BareMetal.APIVIP, NodeDNSIP: installConfig.Config.Platform.BareMetal.DNSVIP, IngressIP: installConfig.Config.Platform.BareMetal.IngressVIP, } case gcp.Name: - config.Status.PlatformStatus.Type = configv1.GCPPlatformType + config.Spec.PlatformSpec.Type = configv1.GCPPlatformType config.Status.PlatformStatus.GCP = &configv1.GCPPlatformStatus{ ProjectID: installConfig.Config.Platform.GCP.ProjectID, Region: installConfig.Config.Platform.GCP.Region, @@ -119,48 +139,40 @@ func (i *Infrastructure) Generate(dependencies asset.Parents) error { Data: content, }) case libvirt.Name: - config.Status.PlatformStatus.Type = configv1.LibvirtPlatformType + config.Spec.PlatformSpec.Type = configv1.LibvirtPlatformType case none.Name: - config.Status.PlatformStatus.Type = configv1.NonePlatformType + config.Spec.PlatformSpec.Type = configv1.NonePlatformType case openstack.Name: - config.Status.PlatformStatus.Type = configv1.OpenStackPlatformType - apiVIP, err := openstackdefaults.APIVIP(installConfig.Config.Networking) - if err != nil { - return err - } + config.Spec.PlatformSpec.Type = configv1.OpenStackPlatformType dnsVIP, err := openstackdefaults.DNSVIP(installConfig.Config.Networking) if err != nil { return err } - ingressVIP, err := openstackdefaults.IngressVIP(installConfig.Config.Networking) - if err != nil { - return err - } config.Status.PlatformStatus.OpenStack = &configv1.OpenStackPlatformStatus{ - APIServerInternalIP: apiVIP.String(), + APIServerInternalIP: installConfig.Config.OpenStack.APIVIP, NodeDNSIP: dnsVIP.String(), - IngressIP: ingressVIP.String(), + IngressIP: installConfig.Config.OpenStack.IngressVIP, } case vsphere.Name: - config.Status.PlatformStatus.Type = configv1.VSpherePlatformType + config.Spec.PlatformSpec.Type = configv1.VSpherePlatformType if installConfig.Config.VSphere.APIVIP != "" { config.Status.PlatformStatus.VSphere = &configv1.VSpherePlatformStatus{ APIServerInternalIP: installConfig.Config.VSphere.APIVIP, - NodeDNSIP: installConfig.Config.VSphere.DNSVIP, IngressIP: installConfig.Config.VSphere.IngressVIP, } } case ovirt.Name: - config.Status.PlatformStatus.Type = configv1.OvirtPlatformType + config.Spec.PlatformSpec.Type = configv1.OvirtPlatformType config.Status.PlatformStatus.Ovirt = &configv1.OvirtPlatformStatus{ APIServerInternalIP: installConfig.Config.Ovirt.APIVIP, NodeDNSIP: installConfig.Config.Ovirt.DNSVIP, IngressIP: installConfig.Config.Ovirt.IngressVIP, } default: - config.Status.PlatformStatus.Type = configv1.NonePlatformType + config.Spec.PlatformSpec.Type = configv1.NonePlatformType } - config.Status.Platform = config.Status.PlatformStatus.Type + config.Status.Platform = config.Spec.PlatformSpec.Type + config.Status.PlatformStatus.Type = config.Spec.PlatformSpec.Type if cloudproviderconfig.ConfigMap != nil { // set the configmap reference. diff --git a/pkg/asset/manifests/vsphere/cloudproviderconfig.go b/pkg/asset/manifests/vsphere/cloudproviderconfig.go index d6a6f5be52d..a3fad9d5237 100644 --- a/pkg/asset/manifests/vsphere/cloudproviderconfig.go +++ b/pkg/asset/manifests/vsphere/cloudproviderconfig.go @@ -27,7 +27,10 @@ func CloudProviderConfig(clusterName string, p *vspheretypes.Platform) (string, printIfNotEmpty(buf, "server", p.VCenter) printIfNotEmpty(buf, "datacenter", p.Datacenter) printIfNotEmpty(buf, "default-datastore", p.DefaultDatastore) - printIfNotEmpty(buf, "folder", clusterName) + printIfNotEmpty(buf, "folder", p.Folder) + if p.Folder == "" { + printIfNotEmpty(buf, "folder", clusterName) + } fmt.Fprintln(buf, "") fmt.Fprintf(buf, "[VirtualCenter %q]\n", p.VCenter) diff --git a/pkg/asset/mock/filefetcher_generated.go b/pkg/asset/mock/filefetcher_generated.go index 369c45c2a77..be8c23af358 100644 --- a/pkg/asset/mock/filefetcher_generated.go +++ b/pkg/asset/mock/filefetcher_generated.go @@ -10,30 +10,30 @@ import ( reflect "reflect" ) -// MockFileFetcher is a mock of FileFetcher interface +// MockFileFetcher is a mock of FileFetcher interface. type MockFileFetcher struct { ctrl *gomock.Controller recorder *MockFileFetcherMockRecorder } -// MockFileFetcherMockRecorder is the mock recorder for MockFileFetcher +// MockFileFetcherMockRecorder is the mock recorder for MockFileFetcher. type MockFileFetcherMockRecorder struct { mock *MockFileFetcher } -// NewMockFileFetcher creates a new mock instance +// NewMockFileFetcher creates a new mock instance. func NewMockFileFetcher(ctrl *gomock.Controller) *MockFileFetcher { mock := &MockFileFetcher{ctrl: ctrl} mock.recorder = &MockFileFetcherMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFileFetcher) EXPECT() *MockFileFetcherMockRecorder { return m.recorder } -// FetchByName mocks base method +// FetchByName mocks base method. func (m *MockFileFetcher) FetchByName(arg0 string) (*asset.File, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchByName", arg0) @@ -42,13 +42,13 @@ func (m *MockFileFetcher) FetchByName(arg0 string) (*asset.File, error) { return ret0, ret1 } -// FetchByName indicates an expected call of FetchByName +// FetchByName indicates an expected call of FetchByName. func (mr *MockFileFetcherMockRecorder) FetchByName(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchByName", reflect.TypeOf((*MockFileFetcher)(nil).FetchByName), arg0) } -// FetchByPattern mocks base method +// FetchByPattern mocks base method. func (m *MockFileFetcher) FetchByPattern(pattern string) ([]*asset.File, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchByPattern", pattern) @@ -57,7 +57,7 @@ func (m *MockFileFetcher) FetchByPattern(pattern string) ([]*asset.File, error) return ret0, ret1 } -// FetchByPattern indicates an expected call of FetchByPattern +// FetchByPattern indicates an expected call of FetchByPattern. func (mr *MockFileFetcherMockRecorder) FetchByPattern(pattern interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchByPattern", reflect.TypeOf((*MockFileFetcher)(nil).FetchByPattern), pattern) diff --git a/pkg/asset/rhcos/image.go b/pkg/asset/rhcos/image.go index 926769da8cc..56dbbaa0fa4 100644 --- a/pkg/asset/rhcos/image.go +++ b/pkg/asset/rhcos/image.go @@ -3,7 +3,7 @@ package rhcos import ( "context" - "github.com/openshift/installer/pkg/types/ovirt" + "fmt" "os" "time" @@ -12,6 +12,7 @@ import ( "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" + configaws "github.com/openshift/installer/pkg/asset/installconfig/aws" "github.com/openshift/installer/pkg/rhcos" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/aws" @@ -21,6 +22,7 @@ import ( "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/none" "github.com/openshift/installer/pkg/types/openstack" + "github.com/openshift/installer/pkg/types/ovirt" "github.com/openshift/installer/pkg/types/vsphere" ) @@ -75,7 +77,14 @@ func osImage(config *types.InstallConfig) (string, error) { osimage = config.Platform.AWS.AMIID break } - osimage, err = rhcos.AMI(ctx, arch, config.Platform.AWS.Region) + region := config.Platform.AWS.Region + if !configaws.IsKnownRegion(config.Platform.AWS.Region) { + region = "us-east-1" + } + osimage, err = rhcos.AMI(ctx, arch, region) + if region != config.Platform.AWS.Region { + osimage = fmt.Sprintf("%s,%s", osimage, region) + } case gcp.Name: osimage, err = rhcos.GCP(ctx, arch) case libvirt.Name: diff --git a/pkg/asset/tls/bootstrapsshkeypair.go b/pkg/asset/tls/bootstrapsshkeypair.go new file mode 100644 index 00000000000..03aba020959 --- /dev/null +++ b/pkg/asset/tls/bootstrapsshkeypair.go @@ -0,0 +1,79 @@ +package tls + +import ( + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + + "github.com/openshift/installer/pkg/asset" +) + +// BootstrapSSHKeyPair generates a private, public key pair for SSH. +// These keys can use to used to configure the bootstrap-host so that the private key can be +// used to connect. +type BootstrapSSHKeyPair struct { + Priv []byte // private key + Pub []byte // public ssh key +} + +const bootstrapSSHKeyPairFilenameBase = "bootstrap-ssh" + +var _ asset.Asset = (*BootstrapSSHKeyPair)(nil) + +// Dependencies lists the assets required to generate the BootstrapSSHKeyPair. +func (a *BootstrapSSHKeyPair) Dependencies() []asset.Asset { + return []asset.Asset{} +} + +// Name defines a user freindly name for BootstrapSSHKeyPair. +func (a *BootstrapSSHKeyPair) Name() string { + return "Bootstrap SSH Key Pair" +} + +// Generate generates the key pair based on its dependencies. +func (a *BootstrapSSHKeyPair) Generate(dependencies asset.Parents) error { + kp := KeyPair{} + if err := kp.Generate(bootstrapSSHKeyPairFilenameBase); err != nil { + return errors.Wrap(err, "failed to generate key pair") + } + + publicRSAKey, err := PemToPublicKey(kp.Pub) + if err != nil { + return errors.Wrap(err, "failed to parse the public RSA key") + } + + publicSSHKey, err := ssh.NewPublicKey(publicRSAKey) + if err != nil { + return errors.Wrap(err, "failed to create public SSH key from public RSA key") + } + + a.Priv = kp.Private() + a.Pub = ssh.MarshalAuthorizedKey(publicSSHKey) + + return nil +} + +// Public returns the public SSH key. +func (a *BootstrapSSHKeyPair) Public() []byte { + return a.Pub +} + +// Private returns the private key. +func (a *BootstrapSSHKeyPair) Private() []byte { + return a.Priv +} + +// Files returns the files generated by the asset. +func (a *BootstrapSSHKeyPair) Files() []*asset.File { + return []*asset.File{{ + Filename: assetFilePath(bootstrapSSHKeyPairFilenameBase + ".key"), + Data: a.Priv, + }, { + Filename: assetFilePath(bootstrapSSHKeyPairFilenameBase + ".pub"), + Data: a.Pub, + }} +} + +// Load is a no-op because the service account keypair is not written to disk. +func (a *BootstrapSSHKeyPair) Load(asset.FileFetcher) (bool, error) { + return false, nil +} diff --git a/pkg/asset/tls/mcscertkey.go b/pkg/asset/tls/mcscertkey.go index c9895f6c3d1..2f01c150ed2 100644 --- a/pkg/asset/tls/mcscertkey.go +++ b/pkg/asset/tls/mcscertkey.go @@ -9,7 +9,6 @@ import ( "github.com/openshift/installer/pkg/asset/installconfig" baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" openstacktypes "github.com/openshift/installer/pkg/types/openstack" - openstackdefaults "github.com/openshift/installer/pkg/types/openstack/defaults" ovirttypes "github.com/openshift/installer/pkg/types/ovirt" vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) @@ -50,12 +49,8 @@ func (a *MCSCertKey) Generate(dependencies asset.Parents) error { cfg.IPAddresses = []net.IP{net.ParseIP(installConfig.Config.BareMetal.APIVIP)} cfg.DNSNames = []string{hostname, installConfig.Config.BareMetal.APIVIP} case openstacktypes.Name: - apiVIP, err := openstackdefaults.APIVIP(installConfig.Config.Networking) - if err != nil { - return err - } - cfg.IPAddresses = []net.IP{apiVIP} - cfg.DNSNames = []string{hostname, apiVIP.String()} + cfg.IPAddresses = []net.IP{net.ParseIP(installConfig.Config.OpenStack.APIVIP)} + cfg.DNSNames = []string{hostname, installConfig.Config.OpenStack.APIVIP} case ovirttypes.Name: cfg.IPAddresses = []net.IP{net.ParseIP(installConfig.Config.Ovirt.APIVIP)} cfg.DNSNames = []string{hostname, installConfig.Config.Ovirt.APIVIP} diff --git a/pkg/asset/tls/utils.go b/pkg/asset/tls/utils.go index c374fcda086..2d55b2b7f33 100644 --- a/pkg/asset/tls/utils.go +++ b/pkg/asset/tls/utils.go @@ -66,6 +66,23 @@ func PemToPrivateKey(data []byte) (*rsa.PrivateKey, error) { return x509.ParsePKCS1PrivateKey(block.Bytes) } +// PemToPublicKey converts a data block to rsa.PublicKey. +func PemToPublicKey(data []byte) (*rsa.PublicKey, error) { + block, _ := pem.Decode(data) + if block == nil { + return nil, errors.Errorf("could not find a PEM block in the public key") + } + obji, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + publicKey, ok := obji.(*rsa.PublicKey) + if !ok { + return nil, errors.Errorf("invalid public key format, expected RSA") + } + return publicKey, nil +} + // PemToCertificate converts a data block to x509.Certificate. func PemToCertificate(data []byte) (*x509.Certificate, error) { block, _ := pem.Decode(data) diff --git a/pkg/destroy/aws/aws.go b/pkg/destroy/aws/aws.go index 41004a31006..db472a12de9 100644 --- a/pkg/destroy/aws/aws.go +++ b/pkg/destroy/aws/aws.go @@ -74,15 +74,18 @@ func New(logger logrus.FieldLogger, metadata *types.ClusterMetadata) (providers. for _, filter := range metadata.ClusterPlatformMetadata.AWS.Identifier { filters = append(filters, filter) } - - session, err := awssession.GetSession() + region := metadata.ClusterPlatformMetadata.AWS.Region + session, err := awssession.GetSessionWithOptions( + awssession.WithRegion(region), + awssession.WithServiceEndpoints(region, metadata.ClusterPlatformMetadata.AWS.ServiceEndpoints), + ) if err != nil { return nil, err } return &ClusterUninstaller{ Filters: filters, - Region: metadata.ClusterPlatformMetadata.AWS.Region, + Region: region, Logger: logger, ClusterID: metadata.InfraID, Session: session, @@ -103,16 +106,13 @@ func (o *ClusterUninstaller) Run() error { return err } - awsConfig := &aws.Config{Region: aws.String(o.Region)} awsSession := o.Session if awsSession == nil { // Relying on appropriate AWS ENV vars (eg AWS_PROFILE, AWS_ACCESS_KEY_ID, etc) - awsSession, err = session.NewSession(awsConfig) + awsSession, err = session.NewSession(aws.NewConfig().WithRegion(o.Region)) if err != nil { return err } - } else { - awsSession = awsSession.Copy(awsConfig) } awsSession.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "openshiftInstaller.OpenshiftInstallerUserAgentHandler", @@ -125,12 +125,21 @@ func (o *ClusterUninstaller) Run() error { tagClientNames := map[*resourcegroupstaggingapi.ResourceGroupsTaggingAPI]string{ tagClients[0]: o.Region, } - if o.Region != "us-east-1" { - tagClient := resourcegroupstaggingapi.New( - awsSession, aws.NewConfig().WithRegion("us-east-1"), - ) - tagClients = append(tagClients, tagClient) - tagClientNames[tagClient] = "us-east-1" + + switch o.Region { + case endpoints.CnNorth1RegionID, endpoints.CnNorthwest1RegionID: + if o.Region != endpoints.CnNorthwest1RegionID { + tagClient := resourcegroupstaggingapi.New(awsSession, aws.NewConfig().WithRegion(endpoints.CnNorthwest1RegionID)) + tagClients = append(tagClients, tagClient) + tagClientNames[tagClient] = endpoints.CnNorthwest1RegionID + } + + default: + if o.Region != endpoints.UsEast1RegionID { + tagClient := resourcegroupstaggingapi.New(awsSession, aws.NewConfig().WithRegion(endpoints.UsEast1RegionID)) + tagClients = append(tagClients, tagClient) + tagClientNames[tagClient] = endpoints.UsEast1RegionID + } } iamClient := iam.New(awsSession) @@ -1832,6 +1841,45 @@ func deleteS3(session *session.Session, arn arn.ARN, logger logrus.FieldLogger) } logger.Debug("Emptied") + var lastError error + err = client.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), &s3.ListObjectVersionsInput{ + Bucket: aws.String(arn.Resource), + MaxKeys: aws.Int64(1000), + }, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + var deleteObjects []*s3.ObjectIdentifier + for _, deleteMarker := range page.DeleteMarkers { + deleteObjects = append(deleteObjects, &s3.ObjectIdentifier{ + Key: aws.String(*deleteMarker.Key), + VersionId: aws.String(*deleteMarker.VersionId), + }) + } + for _, version := range page.Versions { + deleteObjects = append(deleteObjects, &s3.ObjectIdentifier{ + Key: aws.String(*version.Key), + VersionId: aws.String(*version.VersionId), + }) + } + if len(deleteObjects) > 0 { + _, err := client.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(arn.Resource), + Delete: &s3.Delete{ + Objects: deleteObjects, + }, + }) + if err != nil { + lastError = errors.Wrapf(err, "delete object failed %v", err) + } + } + return !lastPage + }) + if lastError != nil { + return lastError + } + if err != nil && !isBucketNotFound(err) { + return err + } + logger.Debug("Versions Deleted") + _, err = client.DeleteBucket(&s3.DeleteBucketInput{ Bucket: aws.String(arn.Resource), }) diff --git a/pkg/destroy/azure/azure.go b/pkg/destroy/azure/azure.go index cc6590cf587..7c385072aeb 100644 --- a/pkg/destroy/azure/azure.go +++ b/pkg/destroy/azure/azure.go @@ -260,6 +260,10 @@ func deleteResourceGroup(ctx context.Context, client resources.GroupsClient, log delFuture, err := client.Delete(ctx, name) if err != nil { + if wasNotFound(delFuture.Response()) { + logger.Debug("already deleted") + return nil + } return err } diff --git a/pkg/destroy/openstack/openstack.go b/pkg/destroy/openstack/openstack.go index edf558b584f..0d91dd0ff23 100644 --- a/pkg/destroy/openstack/openstack.go +++ b/pkg/destroy/openstack/openstack.go @@ -14,6 +14,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/apiversions" "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" sg "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" @@ -25,6 +26,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" "github.com/gophercloud/utils/openstack/clientconfig" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" ) @@ -59,15 +61,18 @@ type ClusterUninstaller struct { Cloud string // Filter contains the openshiftClusterID to filter tags Filter Filter - Logger logrus.FieldLogger + // InfraID contains unique cluster identifier + InfraID string + Logger logrus.FieldLogger } // New returns an OpenStack destroyer from ClusterMetadata. func New(logger logrus.FieldLogger, metadata *types.ClusterMetadata) (providers.Destroyer, error) { return &ClusterUninstaller{ - Cloud: metadata.ClusterPlatformMetadata.OpenStack.Cloud, - Filter: metadata.ClusterPlatformMetadata.OpenStack.Identifier, - Logger: logger, + Cloud: metadata.ClusterPlatformMetadata.OpenStack.Cloud, + Filter: metadata.ClusterPlatformMetadata.OpenStack.Identifier, + InfraID: metadata.InfraID, + Logger: logger, }, nil } @@ -110,6 +115,12 @@ func (o *ClusterUninstaller) Run() error { } } + // we need to untag the custom network if it was provided by the user + err := untagRunner(opts, o.InfraID, o.Logger) + if err != nil { + return err + } + return nil } @@ -998,3 +1009,68 @@ func deleteImages(opts *clientconfig.ClientOpts, filter Filter, logger logrus.Fi } return true, nil } + +func untagRunner(opts *clientconfig.ClientOpts, infraID string, logger logrus.FieldLogger) error { + backoffSettings := wait.Backoff{ + Duration: time.Second * 10, + Steps: 25, + } + + err := wait.ExponentialBackoff(backoffSettings, func() (bool, error) { + return untagPrimaryNetwork(opts, infraID, logger) + }) + if err != nil { + if err == wait.ErrWaitTimeout { + return err + } + return errors.Errorf("Unrecoverable error: %v", err) + } + + return nil +} + +// untagNetwork removes the tag from the primary cluster network based on unfra id +func untagPrimaryNetwork(opts *clientconfig.ClientOpts, infraID string, logger logrus.FieldLogger) (bool, error) { + networkTag := infraID + "-primaryClusterNetwork" + + logger.Debugf("Removing tag %v from openstack networks", networkTag) + defer logger.Debug("Exiting untagging openstack networks") + + conn, err := clientconfig.NewServiceClient("network", opts) + if err != nil { + logger.Debug(err) + return false, nil + } + + listOpts := networks.ListOpts{ + Tags: networkTag, + } + + allPages, err := networks.List(conn, listOpts).AllPages() + if err != nil { + logger.Debug(err) + return false, nil + } + + allNetworks, err := networks.ExtractNetworks(allPages) + if err != nil { + logger.Debug(err) + return false, nil + } + + if len(allNetworks) > 1 { + return false, errors.Errorf("More than one network with tag %v", networkTag) + } + + if len(allNetworks) == 0 { + // The network has already been deleted. + return true, nil + } + + err = attributestags.Delete(conn, "networks", allNetworks[0].ID, networkTag).ExtractErr() + if err != nil { + return false, nil + } + + return true, nil +} diff --git a/pkg/destroy/ovirt/destroyer.go b/pkg/destroy/ovirt/destroyer.go index a2f201c22d2..693e7fd4c7d 100644 --- a/pkg/destroy/ovirt/destroyer.go +++ b/pkg/destroy/ovirt/destroyer.go @@ -33,27 +33,31 @@ func (uninstaller *ClusterUninstaller) Run() error { } defer con.Close() - err = uninstaller.removeVms(con) - uninstaller.Logger.Errorf("Removing VMs - error: %s", err) - err = uninstaller.removeTag(con) - uninstaller.Logger.Errorf("Removing Tag - error: %s", err) - err = uninstaller.removeTemplate(con) - uninstaller.Logger.Errorf("Removing Template - error: %s", err) + if err := uninstaller.removeVMs(con); err != nil { + uninstaller.Logger.Errorf("Failed to remove VMs: %s", err) + } + if err := uninstaller.removeTag(con); err != nil { + uninstaller.Logger.Errorf("Failed to remove tag: %s", err) + } + if err := uninstaller.removeTemplate(con); err != nil { + uninstaller.Logger.Errorf("Failed to remove template: %s", err) + } + return nil } -func (uninstaller *ClusterUninstaller) removeVms(con *ovirtsdk.Connection) error { +func (uninstaller *ClusterUninstaller) removeVMs(con *ovirtsdk.Connection) error { // - find all vms by tag name=infraID vmsService := con.SystemService().VmsService() searchTerm := fmt.Sprintf("tag=%s", uninstaller.Metadata.InfraID) - uninstaller.Logger.Infof("searching VMs by %s", searchTerm) + uninstaller.Logger.Debugf("Searching VMs by %s", searchTerm) vmsResponse, err := vmsService.List().Search(searchTerm).Send() if err != nil { return err } // - stop + delete VMS vms := vmsResponse.MustVms().Slice() - uninstaller.Logger.Infof("Found %s VMs", len(vms)) + uninstaller.Logger.Debugf("Found %d VMs", len(vms)) wg := sync.WaitGroup{} wg.Add(len(vms)) for _, vm := range vms { @@ -77,10 +81,9 @@ func (uninstaller *ClusterUninstaller) removeTag(con *ovirtsdk.Connection) error if tagsServiceListResponse != nil { for _, t := range tagsServiceListResponse.MustTags().Slice() { if t.MustName() == uninstaller.Metadata.InfraID { + uninstaller.Logger.Infof("Removing tag %s", t.MustName()) _, err := tagsService.TagService(t.MustId()).Remove().Send() - uninstaller.Logger.Infof("Removing tag %s : %s", t.MustName(), "errors: %s", err) if err != nil { - uninstaller.Logger.Debugf("Failed removing tag %s : %s", t.MustName(), err) return err } } @@ -93,23 +96,27 @@ func (uninstaller *ClusterUninstaller) stopVM(vmsService *ovirtsdk.VmsService, v vmService := vmsService.VmService(vm.MustId()) // this is a teardown, stopping instead of shutting down. _, err := vmService.Stop().Send() - uninstaller.Logger.Infof("Stopping VM %s : %s", vm.MustName(), "errors: %s", err) - if err != nil { - uninstaller.Logger.Debugf("Failed stopping VM %s : %s", vm.MustName(), err) + if err == nil { + uninstaller.Logger.Infof("Stopping VM %s", vm.MustName()) + } else { + uninstaller.Logger.Errorf("Failed to stop VM %s: %s", vm.MustName(), err) } waitForDownDuration := time.Minute * 10 err = vmService.Connection().WaitForVM(vm.MustId(), ovirtsdk.VMSTATUS_DOWN, waitForDownDuration) - if err != nil { - uninstaller.Logger.Warnf("Waiting %d for VM %s to power-off", waitForDownDuration, vm.MustName()) + if err == nil { + uninstaller.Logger.Infof("VM %s powered off", vm.MustName()) + } else { + uninstaller.Logger.Warnf("Waited %d for VM %s to power off: %s", waitForDownDuration, vm.MustName(), err) } } func (uninstaller *ClusterUninstaller) removeVM(vmsService *ovirtsdk.VmsService, vm *ovirtsdk.Vm) { vmService := vmsService.VmService(vm.MustId()) _, err := vmService.Remove().Send() - uninstaller.Logger.Infof("Removing VM %s : %s", vm.MustName(), "errors: %s", err) - if err != nil { - uninstaller.Logger.Debugf("Failed removing VM %s : %s", vm.MustName(), err) + if err == nil { + uninstaller.Logger.Infof("Removing VM %s", vm.MustName()) + } else { + uninstaller.Logger.Errorf("Failed to remove VM %s: %s", vm.MustName(), err) } } @@ -118,8 +125,7 @@ func (uninstaller *ClusterUninstaller) removeTemplate(con *ovirtsdk.Connection) search, err := con.SystemService().TemplatesService(). List().Search(fmt.Sprintf("name=%s", uninstaller.Metadata.InfraID)).Send() if err != nil { - uninstaller.Logger.Errorf("Couldn't find a template with name %s", uninstaller.Metadata.InfraID) - return nil + return fmt.Errorf("couldn't find a template with name %s", uninstaller.Metadata.InfraID) } if result, ok := search.Templates(); ok { // the results can potentially return a list of template @@ -129,7 +135,6 @@ func (uninstaller *ClusterUninstaller) removeTemplate(con *ovirtsdk.Connection) service := con.SystemService().TemplatesService().TemplateService(tmp.MustId()) _, err := service.Remove().Send() if err != nil { - uninstaller.Logger.Errorf("Failed to remove Template %s %s", tmp.MustName(), tmp.MustId()) return err } } diff --git a/pkg/gather/ssh/agent.go b/pkg/gather/ssh/agent.go index 5debd0494ad..e27146a4118 100644 --- a/pkg/gather/ssh/agent.go +++ b/pkg/gather/ssh/agent.go @@ -52,8 +52,22 @@ func newAgent(keyPaths []string) (agent.Agent, error) { } func loadKeys(paths []string) (map[string]interface{}, error) { + keys := map[string]interface{}{} if len(paths) > 0 { - return LoadPrivateSSHKeys(paths) + pkeys, err := LoadPrivateSSHKeys(paths) + if err != nil { + return nil, err + } + for k, v := range pkeys { + keys[k] = v + } + } + dkeys, err := defaultPrivateSSHKeys() + if err != nil && len(paths) == 0 { + return nil, err + } + for k, v := range dkeys { + keys[k] = v } - return defaultPrivateSSHKeys() + return keys, nil } diff --git a/pkg/metrics/timer/timer.go b/pkg/metrics/timer/timer.go new file mode 100644 index 00000000000..54f4bb8bd45 --- /dev/null +++ b/pkg/metrics/timer/timer.go @@ -0,0 +1,100 @@ +package timer + +import ( + "fmt" + "time" + + "github.com/sirupsen/logrus" +) + +// Timer is the struct that keeps track of each of the sections. +type Timer struct { + listOfStages []string + stageTimes map[string]time.Duration + startTimes map[string]time.Time +} + +const ( + + // TotalTimeElapsed is a constant string value to denote total time elapsed. + TotalTimeElapsed = "Total" +) + +var timer = NewTimer() + +// StartTimer initiailzes the timer object with the current timestamp information. +func StartTimer(key string) { + timer.StartTimer(key) +} + +// StopTimer records the duration for the current stage sent as the key parameter and stores the information. +func StopTimer(key string) { + timer.StopTimer(key) +} + +// LogSummary prints the summary of all the times collected so far into the INFO section. +func LogSummary() { + timer.LogSummary(logrus.StandardLogger()) +} + +// NewTimer returns a new timer that can be used to track sections and +func NewTimer() Timer { + return Timer{ + listOfStages: []string{}, + stageTimes: make(map[string]time.Duration), + startTimes: make(map[string]time.Time), + } +} + +// StartTimer initializes the timer object with the current timestamp information. +func (t *Timer) StartTimer(key string) { + t.listOfStages = append(t.listOfStages, key) + t.startTimes[key] = time.Now().Round(time.Second) +} + +// StopTimer records the duration for the current stage sent as the key parameter and stores the information. +func (t *Timer) StopTimer(key string) time.Duration { + if item, found := t.startTimes[key]; found { + duration := time.Since(item).Round(time.Second) + t.stageTimes[key] = duration + } + return time.Since(time.Now()) +} + +// LogSummary prints the summary of all the times collected so far into the INFO section. +// The format of printing will be the following: +// If there are no stages except the total time stage, then it only prints the following +// Time elapsed: ms +// If there are multiple stages, it prints the following: +// Time elapsed for each section +// Stage1: ms +// Stage2: ms +// . +// . +// . +// StageN: ms +// Time elapsed: ms +// All durations printed are rounded up to the next second value and printed in the format mentioned above. +func (t *Timer) LogSummary(logger *logrus.Logger) { + maxLen := 0 + count := 0 + for _, item := range t.listOfStages { + if len(item) > maxLen && item != TotalTimeElapsed { + maxLen = len(item) + } + if t.stageTimes[item] > 0 { + count++ + } + } + + if maxLen != 0 && count > 0 { + logger.Debugf("Time elapsed per stage:") + } + + for _, item := range t.listOfStages { + if item != TotalTimeElapsed && t.stageTimes[item] > 0 { + logger.Debugf(fmt.Sprintf("%*s: %s", maxLen, item, t.stageTimes[item])) + } + } + logger.Infof("Time elapsed: %s", t.stageTimes[TotalTimeElapsed]) +} diff --git a/pkg/metrics/timer/timer_test.go b/pkg/metrics/timer/timer_test.go new file mode 100644 index 00000000000..34c561a4787 --- /dev/null +++ b/pkg/metrics/timer/timer_test.go @@ -0,0 +1,131 @@ +package timer + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/sirupsen/logrus" +) + +func convertToFormat(buf bytes.Buffer) string { + output := buf.String() + + outputText := "" + + for _, item := range strings.Split(output, "\n") { + var dat map[string]interface{} + + if err := json.Unmarshal([]byte(item), &dat); err != nil { + continue + } + outputText += dat["msg"].(string) + "\n" + } + + return outputText +} + +func TestBasicLogSummaryMultipleStages(t *testing.T) { + timer := NewTimer() + + timer.StartTimer(TotalTimeElapsed) + timer.StartTimer("testStage1") + timer.StartTimer("testStage2") + timer.StartTimer("testStage3") + timer.StartTimer("testStage4") + + time.Sleep(5 * time.Second) + + timer.StopTimer("testStage1") + timer.StopTimer("testStage2") + timer.StopTimer("testStage3") + timer.StopTimer("testStage4") + timer.StopTimer(TotalTimeElapsed) + + timeElapsed := fmt.Sprintf("Time elapsed per stage:\n") + time1 := fmt.Sprintf("testStage1: %s\n", timer.stageTimes["testStage1"]) + time2 := fmt.Sprintf("testStage2: %s\n", timer.stageTimes["testStage2"]) + time3 := fmt.Sprintf("testStage3: %s\n", timer.stageTimes["testStage3"]) + time4 := fmt.Sprintf("testStage4: %s\n", timer.stageTimes["testStage4"]) + timeStageElapsed := fmt.Sprintf("Time elapsed: %s\n", timer.stageTimes[TotalTimeElapsed]) + + text := timeElapsed + time1 + time2 + time3 + time4 + timeStageElapsed + + textOutput := bytes.Buffer{} + + logger := logrus.New() + logger.Out = &textOutput + logger.Level = logrus.DebugLevel + logger.Formatter = &logrus.JSONFormatter{} + + timer.LogSummary(logger) + + outputText := convertToFormat(textOutput) + + if text != outputText { + t.Fatalf("expected message summary printed to be %s, but got %s", text, outputText) + } +} + +func TestTotalOnlyLogSummary(t *testing.T) { + timer := NewTimer() + + timer.StartTimer(TotalTimeElapsed) + time.Sleep(5 * time.Second) + timer.StopTimer(TotalTimeElapsed) + + timeStageElapsed := fmt.Sprintf("Time elapsed: %s\n", timer.stageTimes[TotalTimeElapsed]) + + textOutput := bytes.Buffer{} + + logger := logrus.New() + logger.Out = &textOutput + logger.Level = logrus.DebugLevel + logger.Formatter = &logrus.JSONFormatter{} + + timer.LogSummary(logger) + + outputText := convertToFormat(textOutput) + + if timeStageElapsed != outputText { + t.Fatalf("expected message summary printed to be %s, but got %s", timeStageElapsed, outputText) + } +} + +func TestStartAndStopTimer(t *testing.T) { + timerTotal := NewTimer() + + timerTotal.StartTimer(TotalTimeElapsed) + time.Sleep(5 * time.Second) + + duration := time.Since(timerTotal.startTimes[TotalTimeElapsed]).Round(time.Second) + t.Logf("%s", duration) + if duration < 5*time.Second { + t.Fatalf("Slept for 5 seconds, expected start time to be 5 seconds old, got %s", duration) + } else if duration > 10*time.Second { + t.Fatalf("Slept for 5 seconds, expected start time to be close to 5 seconds old, got %s", duration) + } + timerTotal.StopTimer(TotalTimeElapsed) + + if timerTotal.stageTimes[TotalTimeElapsed] < 5*time.Second || timerTotal.stageTimes[TotalTimeElapsed] > 10*time.Second { + t.Fatalf("Slept for 5 seconds, expected duration to be close to 5 seconds old, got %s", timerTotal.stageTimes[TotalTimeElapsed]) + } +} + +func TestNewTimer(t *testing.T) { + timer := NewTimer() + if len(timer.listOfStages) != 0 { + t.Fatalf("Expected empty list of stages property in the new timer created, got %d", len(timer.listOfStages)) + } + + if len(timer.startTimes) != 0 { + t.Fatalf("Expected empty list of startTimes property in the new timer created, got %d", len(timer.startTimes)) + } + + if len(timer.stageTimes) != 0 { + t.Fatalf("Expected empty list of startTimes property in the new timer created, got %d", len(timer.stageTimes)) + } +} diff --git a/pkg/migrate/azure/privatedns.go b/pkg/migrate/azure/privatedns.go new file mode 100644 index 00000000000..475c6936bf5 --- /dev/null +++ b/pkg/migrate/azure/privatedns.go @@ -0,0 +1,529 @@ +package azure + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest/to" + + "github.com/sirupsen/logrus" + + aznetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network" + azdns "github.com/Azure/azure-sdk-for-go/services/preview/dns/mgmt/2018-03-01-preview/dns" + azprivatedns "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" + azconfig "github.com/openshift/installer/pkg/asset/installconfig/azure" +) + +type legacyDNSZone struct { + zone *azdns.Zone + recordsets []azdns.RecordSet +} + +type legacyDNSClient struct { + resourceGroup string + zonesClient azdns.ZonesClient + recordsetsClient azdns.RecordSetsClient +} + +func newLegacyDNSClient(session *azconfig.Session, resourceGroup string) *legacyDNSClient { + zonesClient := azdns.NewZonesClient(session.Credentials.SubscriptionID) + zonesClient.Authorizer = session.Authorizer + + recordsetsClient := azdns.NewRecordSetsClient(session.Credentials.SubscriptionID) + recordsetsClient.Authorizer = session.Authorizer + + return &legacyDNSClient{resourceGroup, zonesClient, recordsetsClient} +} + +// Takes a subscription ID and parses the resource group out of it. +// A subscription ID has the format "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xxxx-xxxxx-rg/providers/...". +// Splitting the string on '/' gives us the following slice: +// parts[0] = '' +// parts[1] = 'subscriptions' +// parts[2] = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' +// parts[3] = 'resourceGroups' +// parts[4] = 'xxxx-xxxxx-rg' +// parts[..] = ... the rest +// So if the length of the split is at least 5 and index 3 is "resourcegroups", +// we can safely assume the resource group is in the correct place. +func idToResourceGroup(id string) string { + rg := "" + parts := strings.Split(id, "/") + if len(parts) >= 5 && strings.ToLower(parts[3]) == "resourcegroups" { + rg = parts[4] + } + return rg +} + +// Gets a single legacy zone and its recordsets +func (client *legacyDNSClient) getZone(legacyZone string) (*legacyDNSZone, error) { + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + + zone, err := client.zonesClient.Get(ctx, client.resourceGroup, legacyZone) + zone.Response.Response = nil + if err != nil { + return nil, err + } + + if zone.ZoneProperties.ZoneType != azdns.Private { + return nil, errors.New("not a private zone") + } + + legacyDNSZone := legacyDNSZone{} + legacyDNSZone.zone = &zone + + for recordsetsPage, err := client.recordsetsClient.ListAllByDNSZone(ctx, client.resourceGroup, to.String(zone.Name), to.Int32Ptr(100), ""); recordsetsPage.NotDone(); err = recordsetsPage.NextWithContext(ctx) { + if err != nil { + return nil, err + } + + for _, rs := range recordsetsPage.Values() { + legacyDNSZone.recordsets = append(legacyDNSZone.recordsets, rs) + } + } + + return &legacyDNSZone, nil +} + +// Gets all legacy zones +func (client *legacyDNSClient) getZones() ([]azdns.Zone, error) { + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + + var legacyDNSZones []azdns.Zone + for zonesPage, err := client.zonesClient.List(ctx, to.Int32Ptr(100)); zonesPage.NotDone(); err = zonesPage.NextWithContext(ctx) { + if err != nil { + return nil, err + } + + for _, zone := range zonesPage.Values() { + zone.Response.Response = nil + if zone.ZoneProperties.ZoneType != azdns.Private { + continue + } + + legacyDNSZones = append(legacyDNSZones, zone) + } + } + + return legacyDNSZones, nil +} + +type privateDNSClient struct { + resourceGroup string + vnetResourceGroup string + virtualNetwork string + zonesClient azprivatedns.PrivateZonesClient + recordsetsClient azprivatedns.RecordSetsClient + virtualNetworkLinksClient azprivatedns.VirtualNetworkLinksClient + virtualNetworksClient aznetwork.VirtualNetworksClient +} + +func newPrivateDNSClient(session *azconfig.Session, resourceGroup string, virtualNetwork string, vnetResourceGroup string) *privateDNSClient { + zonesClient := azprivatedns.NewPrivateZonesClient(session.Credentials.SubscriptionID) + zonesClient.Authorizer = session.Authorizer + + recordsetsClient := azprivatedns.NewRecordSetsClient(session.Credentials.SubscriptionID) + recordsetsClient.Authorizer = session.Authorizer + + virtualNetworkLinksClient := azprivatedns.NewVirtualNetworkLinksClient(session.Credentials.SubscriptionID) + virtualNetworkLinksClient.Authorizer = session.Authorizer + + virtualNetworksClient := aznetwork.NewVirtualNetworksClient(session.Credentials.SubscriptionID) + virtualNetworksClient.Authorizer = session.Authorizer + + return &privateDNSClient{resourceGroup, vnetResourceGroup, virtualNetwork, zonesClient, recordsetsClient, virtualNetworkLinksClient, virtualNetworksClient} +} + +// convert a legacy SOA record to a private SOA record +func legacySoaRecordToPrivate(legacySoaRecord *azdns.SoaRecord) *azprivatedns.SoaRecord { + var soaRecord *azprivatedns.SoaRecord = nil + + if legacySoaRecord != nil { + soaRecord = &azprivatedns.SoaRecord{ + Host: legacySoaRecord.Host, + Email: legacySoaRecord.Email, + SerialNumber: legacySoaRecord.SerialNumber, + RefreshTime: legacySoaRecord.RefreshTime, + RetryTime: legacySoaRecord.RetryTime, + ExpireTime: legacySoaRecord.ExpireTime, + MinimumTTL: legacySoaRecord.MinimumTTL, + } + } + + return soaRecord +} + +// convert a legacy MX record to a private MX record +func legacyMxRecordToPrivate(legacyMxRecord *azdns.MxRecord) *azprivatedns.MxRecord { + var mxRecord *azprivatedns.MxRecord = nil + + if legacyMxRecord != nil { + mxRecord = &azprivatedns.MxRecord{ + Preference: legacyMxRecord.Preference, + Exchange: legacyMxRecord.Exchange, + } + } + + return mxRecord +} + +// convert a legacy A record to a private A record +func legacyARecordToPrivate(legacyARecord *azdns.ARecord) *azprivatedns.ARecord { + var aRecord *azprivatedns.ARecord = nil + + if legacyARecord != nil { + aRecord = &azprivatedns.ARecord{ + Ipv4Address: legacyARecord.Ipv4Address, + } + } + return aRecord +} + +// convert a legacy AAAA record to a private AAAA record +func legacyAaaaRecordToPrivate(legacyAaaaRecord *azdns.AaaaRecord) *azprivatedns.AaaaRecord { + var aaaaRecord *azprivatedns.AaaaRecord = nil + + if legacyAaaaRecord != nil { + aaaaRecord = &azprivatedns.AaaaRecord{ + Ipv6Address: legacyAaaaRecord.Ipv6Address, + } + } + + return aaaaRecord +} + +// convert a legacy CNAME record to a private CNAME record +func legacyCnameRecordToPrivate(legacyCnameRecord *azdns.CnameRecord) *azprivatedns.CnameRecord { + var cnameRecord *azprivatedns.CnameRecord = nil + + if legacyCnameRecord != nil { + cnameRecord = &azprivatedns.CnameRecord{ + Cname: legacyCnameRecord.Cname, + } + } + + return cnameRecord +} + +// convert a legacy PTR record to a private PTR record +func legacyPtrRecordToPrivate(legacyPtrRecord *azdns.PtrRecord) *azprivatedns.PtrRecord { + var ptrRecord *azprivatedns.PtrRecord = nil + + if legacyPtrRecord != nil { + ptrRecord = &azprivatedns.PtrRecord{ + Ptrdname: legacyPtrRecord.Ptrdname, + } + } + + return ptrRecord +} + +// convert a legacy SRV record to a private SRV record +func legacySrvRecordToPrivate(legacySrvRecord *azdns.SrvRecord) *azprivatedns.SrvRecord { + var srvRecord *azprivatedns.SrvRecord = nil + + if legacySrvRecord != nil { + srvRecord = &azprivatedns.SrvRecord{ + Priority: legacySrvRecord.Priority, + Weight: legacySrvRecord.Weight, + Port: legacySrvRecord.Port, + Target: legacySrvRecord.Target, + } + } + + return srvRecord +} + +// convert a legacy TXT record to a private TXT record +func legacyTxtRecordToPrivate(legacyTxtRecord *azdns.TxtRecord) *azprivatedns.TxtRecord { + var txtRecord *azprivatedns.TxtRecord = nil + + if legacyTxtRecord != nil { + txtRecord = &azprivatedns.TxtRecord{ + Value: legacyTxtRecord.Value, + } + } + + return txtRecord +} + +// convert an array of legacy MX records to an array of private MX records +func legacyMxRecordsToPrivate(legacyMxRecords *[]azdns.MxRecord) *[]azprivatedns.MxRecord { + var mxRecords []azprivatedns.MxRecord = nil + + if legacyMxRecords != nil { + mxRecords = make([]azprivatedns.MxRecord, len(*legacyMxRecords)) + for _, legacyMxRecord := range *legacyMxRecords { + mxRecord := legacyMxRecordToPrivate(&legacyMxRecord) + if mxRecord != nil { + mxRecords = append(mxRecords, *mxRecord) + } + } + } + + return &mxRecords +} + +// convert an array of legacy A records to an array of private A records +func legacyARecordsToPrivate(legacyARecords *[]azdns.ARecord) *[]azprivatedns.ARecord { + var aRecords []azprivatedns.ARecord = nil + + if legacyARecords != nil { + for _, legacyARecord := range *legacyARecords { + aRecord := legacyARecordToPrivate(&legacyARecord) + if aRecord != nil { + aRecords = append(aRecords, *aRecord) + } + + } + } + + return &aRecords +} + +// convert an array of legacy AAAA records to an array of private AAAA records +func legacyAaaaRecordsToPrivate(legacyAaaaRecords *[]azdns.AaaaRecord) *[]azprivatedns.AaaaRecord { + var aaaaRecords []azprivatedns.AaaaRecord = nil + + if legacyAaaaRecords != nil { + for _, legacyAaaaRecord := range *legacyAaaaRecords { + aaaaRecord := legacyAaaaRecordToPrivate(&legacyAaaaRecord) + if aaaaRecord != nil { + aaaaRecords = append(aaaaRecords, *aaaaRecord) + } + } + } + + return &aaaaRecords +} + +// convert an array of legacy PTR records to an array of private PTR records +func legacyPtrRecordsToPrivate(legacyPtrRecords *[]azdns.PtrRecord) *[]azprivatedns.PtrRecord { + var ptrRecords []azprivatedns.PtrRecord = nil + + if legacyPtrRecords != nil { + for _, legacyPtrRecord := range *legacyPtrRecords { + ptrRecord := legacyPtrRecordToPrivate(&legacyPtrRecord) + if ptrRecord != nil { + ptrRecords = append(ptrRecords, *ptrRecord) + } + } + } + + return &ptrRecords +} + +// convert an array of legacy SRV records to an array of private SRV records +func legacySrvRecordsToPrivate(legacySrvRecords *[]azdns.SrvRecord) *[]azprivatedns.SrvRecord { + var srvRecords []azprivatedns.SrvRecord = nil + + if legacySrvRecords != nil { + for _, legacySrvRecord := range *legacySrvRecords { + srvRecord := legacySrvRecordToPrivate(&legacySrvRecord) + if srvRecord != nil { + srvRecords = append(srvRecords, *srvRecord) + } + } + } + + return &srvRecords +} + +// convert an array of legacy TXT records to an array of private TXT records +func legacyTxtRecordsToPrivate(legacyTxtRecords *[]azdns.TxtRecord) *[]azprivatedns.TxtRecord { + var txtRecords []azprivatedns.TxtRecord = nil + + if legacyTxtRecords != nil { + for _, legacyTxtRecord := range *legacyTxtRecords { + txtRecord := legacyTxtRecordToPrivate(&legacyTxtRecord) + if txtRecord != nil { + txtRecords = append(txtRecords, *txtRecord) + } + } + } + + return &txtRecords +} + +// Transforms a legacy zone to a private zone +func (client *privateDNSClient) migrateLegacyZone(legacyDNSZone *legacyDNSZone, link bool) error { + legacyZone := legacyDNSZone.zone + + // Setup the private zone to create + privateZone := azprivatedns.PrivateZone{} + privateZone.Tags = legacyZone.Tags + privateZone.Location = legacyZone.Location + privateZone.Name = legacyZone.Name + privateZone.PrivateZoneProperties = &azprivatedns.PrivateZoneProperties{ + MaxNumberOfRecordSets: legacyZone.ZoneProperties.MaxNumberOfRecordSets, + } + + legacyRecordSets := legacyDNSZone.recordsets + + // Setup the associated recordsets to create + privateRecordSets := []*azprivatedns.RecordSet{} + for _, legacyRecordSet := range legacyRecordSets { + recordType := strings.Replace(*legacyRecordSet.Type, "/dnszones/", "/privateDnsZones/", 1) + + // NS not supported in private zones + if strings.TrimPrefix(recordType, "Microsoft.Network/privateDnsZones/") == string(azdns.NS) { + continue + } + + privateRecordSet := azprivatedns.RecordSet{ + Name: legacyRecordSet.Name, + RecordSetProperties: &azprivatedns.RecordSetProperties{ + Metadata: legacyRecordSet.Metadata, + TTL: legacyRecordSet.TTL, + Fqdn: legacyRecordSet.Fqdn, + SoaRecord: legacySoaRecordToPrivate(legacyRecordSet.SoaRecord), + CnameRecord: legacyCnameRecordToPrivate(legacyRecordSet.CnameRecord), + MxRecords: legacyMxRecordsToPrivate(legacyRecordSet.MxRecords), + ARecords: legacyARecordsToPrivate(legacyRecordSet.ARecords), + AaaaRecords: legacyAaaaRecordsToPrivate(legacyRecordSet.AaaaRecords), + PtrRecords: legacyPtrRecordsToPrivate(legacyRecordSet.PtrRecords), + SrvRecords: legacySrvRecordsToPrivate(legacyRecordSet.SrvRecords), + TxtRecords: legacyTxtRecordsToPrivate(legacyRecordSet.TxtRecords), + }, + Type: &recordType, + } + privateRecordSets = append(privateRecordSets, &privateRecordSet) + } + + ctx, cancel := context.WithTimeout(context.TODO(), 300*time.Second) + defer cancel() + + // Create/Update the Zone + logrus.Infof("zone: %s ... ", *privateZone.Name) + zoneFuture, err := client.zonesClient.CreateOrUpdate(ctx, client.resourceGroup, *privateZone.Name, privateZone, "", "") + if err != nil { + return err + } + + // Wait for zone creation to complete + err = zoneFuture.WaitForCompletionRef(ctx, client.zonesClient.Client) + if err != nil { + return err + } + + // Read back the newly created zone to verify creation + _, err = client.zonesClient.Get(ctx, client.resourceGroup, *privateZone.Name) + if err != nil { + return err + } + logrus.Info("ok.") + + for _, recordSet := range privateRecordSets { + recordType := azprivatedns.RecordType(strings.TrimPrefix(*recordSet.Type, "Microsoft.Network/privateDnsZones/")) + relativeRecordSetName := *recordSet.Name + recordSet.Type = nil + + // Create/Update the record + logrus.Infof("record: %s %s ... ", recordType, relativeRecordSetName) + _, err := client.recordsetsClient.CreateOrUpdate(ctx, client.resourceGroup, *privateZone.Name, recordType, relativeRecordSetName, *recordSet, "", "") + if err != nil { + return err + } + + // Read back the newly created record to verify creation + _, err = client.recordsetsClient.Get(ctx, client.resourceGroup, *privateZone.Name, recordType, relativeRecordSetName) + if err != nil { + return err + } + logrus.Info("ok.") + } + + // Do we link, or not? + if link == false || client.virtualNetwork == "" { + return nil + } + + // Get the virtual network so we have some parameters for the link creation + virtualNetwork, err := client.virtualNetworksClient.Get(ctx, client.vnetResourceGroup, client.virtualNetwork, "") + if err != nil { + return err + } + + virtualNetworkLinkName := fmt.Sprintf("%s-network-link", strings.Replace(client.vnetResourceGroup, "-rg", "", 1)) + + virtualNetworkLink := azprivatedns.VirtualNetworkLink{ + Location: to.StringPtr("global"), + VirtualNetworkLinkProperties: &azprivatedns.VirtualNetworkLinkProperties{ + VirtualNetwork: &azprivatedns.SubResource{ + ID: virtualNetwork.ID, + }, + RegistrationEnabled: to.BoolPtr(false), + }, + } + + // Create the virtual network link to DNS + logrus.Infof("link: %s ... ", virtualNetworkLinkName) + linkFuture, err := client.virtualNetworkLinksClient.CreateOrUpdate(ctx, client.resourceGroup, *privateZone.Name, virtualNetworkLinkName, virtualNetworkLink, "", "") + if err != nil { + return err + } + + // Wait for the link creation to complete + if err = linkFuture.WaitForCompletionRef(ctx, client.virtualNetworkLinksClient.Client); err != nil { + return err + } + + // Read back the newly created link to verify creation + _, err = client.virtualNetworkLinksClient.Get(ctx, client.resourceGroup, *privateZone.Name, virtualNetworkLinkName) + if err != nil { + return err + } + logrus.Info("ok.") + + return nil +} + +// Migrate does a migration from a legacy zone to a private zone +func Migrate(resourceGroup string, migrateZone string, virtualNetwork string, vnetResourceGroup string, link bool) error { + session, err := azconfig.GetSession() + if err != nil { + return err + } + + legacyDNSClient := newLegacyDNSClient(session, resourceGroup) + privateDNSClient := newPrivateDNSClient(session, resourceGroup, virtualNetwork, vnetResourceGroup) + + legacyZone, err := legacyDNSClient.getZone(migrateZone) + if err != nil { + return err + } + + // create new private zone + err = privateDNSClient.migrateLegacyZone(legacyZone, link) + if err != nil { + return err + } + + return nil +} + +// Eligible shows legacy zones that are eligible for migrating to private zones +func Eligible() error { + session, err := azconfig.GetSession() + if err != nil { + return err + } + + legacyDNSClient := newLegacyDNSClient(session, "") + + zones, err := legacyDNSClient.getZones() + if err != nil { + return err + } + + for _, zone := range zones { + logrus.Infof("legacy zone=%s resourceGroup=%s", *zone.Name, idToResourceGroup(*zone.ID)) + } + + return nil +} diff --git a/pkg/rhcos/ami.go b/pkg/rhcos/ami.go index 625ba9e80ac..04cb15db911 100644 --- a/pkg/rhcos/ami.go +++ b/pkg/rhcos/ami.go @@ -1,3 +1,5 @@ +//go:generate go run ami_regions_generate.go rhcos ../../data/data/rhcos-amd64.json ami_regions.go + package rhcos import ( diff --git a/pkg/rhcos/ami_regions.go b/pkg/rhcos/ami_regions.go new file mode 100644 index 00000000000..aebc16fe8a0 --- /dev/null +++ b/pkg/rhcos/ami_regions.go @@ -0,0 +1,24 @@ +// Code generated by ami_regions_generate.go; DO NOT EDIT. + +package rhcos + +// AMIRegoins is a list of regions where the RHEL CoreOS is published. +var AMIRegions = []string{ + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", +} diff --git a/pkg/rhcos/ami_regions_generate.go b/pkg/rhcos/ami_regions_generate.go new file mode 100644 index 00000000000..7495cc42002 --- /dev/null +++ b/pkg/rhcos/ami_regions_generate.go @@ -0,0 +1,83 @@ +// +build tools + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "text/template" +) + +func main() { + if len(os.Args) != 4 { + log.Fatalln("exactly 4 arguments must be provided") + } + argsWithoutProg := os.Args[1:] + + pkg := argsWithoutProg[0] + srcPath, err := filepath.Abs(argsWithoutProg[1]) + log.Println("srcPath: ", srcPath) + if err != nil { + log.Fatalln("failed to load absolute path for the source") + } + dstPath, err := filepath.Abs(argsWithoutProg[2]) + log.Println("dstPath: ", dstPath) + if err != nil { + log.Fatalln("failed to load absolute path for the source") + } + + srcData, err := ioutil.ReadFile(srcPath) + if err != nil { + log.Fatalln(err) + } + + var m metadata + if err := json.Unmarshal(srcData, &m); err != nil { + log.Fatalln(fmt.Errorf("failed to unmarshal source: %v", err)) + } + + regions := make([]string, 0, len(m.AMIs)) + for region := range m.AMIs { + regions = append(regions, region) + } + sort.Strings(regions) + + tinput := struct { + Pkg string + Regions []string + }{Pkg: pkg, Regions: regions} + + t := template.Must(template.New("ami_regions").Parse(tmpl)) + buf := &bytes.Buffer{} + if err := t.Execute(buf, tinput); err != nil { + log.Fatalln(fmt.Errorf("failed to execute the template: %v", err)) + } + + if err := ioutil.WriteFile(dstPath, buf.Bytes(), 0664); err != nil { + log.Fatalln(err) + } +} + +type metadata struct { + AMIs map[string]struct { + HVM string `json:"hvm"` + } `json:"amis"` +} + +var tmpl = `// Code generated by ami_regions_generate.go; DO NOT EDIT. + +package {{ .Pkg }} + +// AMIRegoins is a list of regions where the RHEL CoreOS is published. +var AMIRegions = []string{ +{{- range $region := .Regions}} + "{{ $region }}", +{{- end}} +} +` diff --git a/pkg/terraform/exec/plugins/vsphereprivate/config.go b/pkg/terraform/exec/plugins/vsphereprivate/config.go index 8c61fb35e5d..f710d5d65ac 100644 --- a/pkg/terraform/exec/plugins/vsphereprivate/config.go +++ b/pkg/terraform/exec/plugins/vsphereprivate/config.go @@ -1,6 +1,7 @@ package vsphereprivate import ( + "context" "fmt" "log" "net/url" @@ -8,11 +9,16 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-vsphere/vsphere" "github.com/vmware/govmomi" + "github.com/vmware/govmomi/vapi/rest" ) // VSphereClient - The VIM/govmomi client. type VSphereClient struct { + // vim client vimClient *govmomi.Client + + // rest client for tags + restClient *rest.Client } // ConfigWrapper - wrapping the terraform-provider-vsphere Config struct @@ -60,6 +66,13 @@ func (cw *ConfigWrapper) Client() (*VSphereClient, error) { if err != nil { return nil, err } + ctx, cancel := context.WithTimeout(context.TODO(), defaultAPITimeout) + defer cancel() + + client.restClient, err = cw.config.SavedRestSessionOrNew(ctx, client.vimClient) + if err != nil { + return nil, err + } log.Printf("[DEBUG] VMWare vSphere Client configured for URL: %s", cw.config.VSphereServer) diff --git a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go index 7f626743aef..d1af3bca789 100644 --- a/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go +++ b/pkg/terraform/exec/plugins/vsphereprivate/resource_vsphereprivate_import_ova.go @@ -12,6 +12,7 @@ import ( "github.com/vmware/govmomi/nfc" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/ovf" + "github.com/vmware/govmomi/vapi/tags" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" @@ -76,6 +77,13 @@ func resourceVSpherePrivateImportOva() *schema.Resource { ForceNew: true, ValidateFunc: validation.NoZeroValues, }, + "tag": { + Type: schema.TypeString, + Description: "The name of the tag to attach the virtual machine in.", + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, }, } } @@ -90,7 +98,7 @@ type importOvaParams struct { Folder *object.Folder } -func findImportOvaParams(client *vim25.Client, datacenter, cluster, datastore, network string) (*importOvaParams, error) { +func findImportOvaParams(client *vim25.Client, datacenter, cluster, datastore, network, folder string) (*importOvaParams, error) { var ccrMo mo.ClusterComputeResource ctx, cancel := context.WithTimeout(context.TODO(), defaultAPITimeout) @@ -106,15 +114,13 @@ func findImportOvaParams(client *vim25.Client, datacenter, cluster, datastore, n } importOvaParams.Datacenter = dcObj - // Find the top-level (and hidden to view) folders in the - // datacenter - folders, err := importOvaParams.Datacenter.Folders(ctx) + // Find the newly created folder based on the path + // provided + folderObj, err := finder.Folder(ctx, folder) if err != nil { return nil, err } - // The only folder we are interested in is VmFolder - // Which can contain our template - importOvaParams.Folder = folders.VmFolder + importOvaParams.Folder = folderObj clusterPath := fmt.Sprintf("/%s/host/%s", datacenter, cluster) @@ -215,6 +221,23 @@ func findImportOvaParams(client *vim25.Client, datacenter, cluster, datastore, n return importOvaParams, nil } +func attachTag(d *schema.ResourceData, meta interface{}) error { + ctx, cancel := context.WithTimeout(context.TODO(), defaultAPITimeout) + defer cancel() + tagManager := tags.NewManager(meta.(*VSphereClient).restClient) + moRef := types.ManagedObjectReference{ + Value: d.Id(), + Type: "VirtualMachine", + } + + err := tagManager.AttachTag(ctx, d.Get("tag").(string), moRef) + + if err != nil { + return err + } + return nil +} + // Used govc/importx/ovf.go as an example to implement // resourceVspherePrivateImportOvaCreate and upload functions // See: https://github.com/vmware/govmomi/blob/cc10a0758d5b4d4873388bcea417251d1ad03e42/govc/importx/ovf.go#L196-L324 @@ -246,7 +269,8 @@ func resourceVSpherePrivateImportOvaCreate(d *schema.ResourceData, meta interfac d.Get("datacenter").(string), d.Get("cluster").(string), d.Get("datastore").(string), - d.Get("network").(string)) + d.Get("network").(string), + d.Get("folder").(string)) if err != nil { return errors.Errorf("failed to find provided vSphere objects: %s", err) } @@ -333,6 +357,11 @@ func resourceVSpherePrivateImportOvaCreate(d *schema.ResourceData, meta interfac } d.SetId(info.Entity.Value) + + err = attachTag(d, meta) + if err != nil { + return errors.Errorf("failed to attach tag to virtual machine: %s", err) + } log.Printf("[DEBUG] %s: ova import complete", d.Get("name").(string)) return resourceVSpherePrivateImportOvaRead(d, meta) diff --git a/pkg/tfvars/aws/aws.go b/pkg/tfvars/aws/aws.go index 815337d7053..04de2b5a202 100644 --- a/pkg/tfvars/aws/aws.go +++ b/pkg/tfvars/aws/aws.go @@ -8,12 +8,16 @@ import ( "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1beta1" + configaws "github.com/openshift/installer/pkg/asset/installconfig/aws" "github.com/openshift/installer/pkg/types" + typesaws "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/aws/defaults" ) type config struct { AMI string `json:"aws_ami"` + AMIRegion string `json:"aws_ami_region"` + CustomEndpoints map[string]string `json:"custom_endpoints,omitempty"` ExtraTags map[string]string `json:"aws_extra_tags,omitempty"` BootstrapInstanceType string `json:"aws_bootstrap_instance_type,omitempty"` MasterInstanceType string `json:"aws_master_instance_type,omitempty"` @@ -29,15 +33,19 @@ type config struct { PrivateSubnets []string `json:"aws_private_subnets,omitempty"` PublicSubnets *[]string `json:"aws_public_subnets,omitempty"` PublishStrategy string `json:"aws_publish_strategy,omitempty"` + SkipRegionCheck bool `json:"aws_skip_region_validation"` } // TFVarsSources contains the parameters to be converted into Terraform variables type TFVarsSources struct { VPC string PrivateSubnets, PublicSubnets []string + Services []typesaws.ServiceEndpoint Publish types.PublishingStrategy + AMIID, AMIRegion string + MasterConfigs, WorkerConfigs []*v1beta1.AWSMachineProviderConfig } @@ -45,6 +53,12 @@ type TFVarsSources struct { func TFVars(sources TFVarsSources) ([]byte, error) { masterConfig := sources.MasterConfigs[0] + endpoints := make(map[string]string) + for _, service := range sources.Services { + service := service + endpoints[service.Name] = service.URL + } + tags := make(map[string]string, len(masterConfig.Tags)) for _, tag := range masterConfig.Tags { tags[tag.Name] = tag.Value @@ -89,9 +103,9 @@ func TFVars(sources TFVarsSources) ([]byte, error) { instanceClass := defaults.InstanceClass(masterConfig.Placement.Region) cfg := &config{ + CustomEndpoints: endpoints, Region: masterConfig.Placement.Region, ExtraTags: tags, - AMI: *masterConfig.AMI.ID, MasterAvailabilityZones: masterAvailabilityZones, WorkerAvailabilityZones: workerAvailabilityZones, BootstrapInstanceType: fmt.Sprintf("%s.large", instanceClass), @@ -101,6 +115,7 @@ func TFVars(sources TFVarsSources) ([]byte, error) { VPC: sources.VPC, PrivateSubnets: sources.PrivateSubnets, PublishStrategy: string(sources.Publish), + SkipRegionCheck: !configaws.IsKnownRegion(masterConfig.Placement.Region), } if len(sources.PublicSubnets) == 0 { @@ -125,5 +140,13 @@ func TFVars(sources TFVarsSources) ([]byte, error) { cfg.KMSKeyID = *rootVolume.EBS.KMSKey.ARN } + if masterConfig.AMI.ID != nil && *masterConfig.AMI.ID != "" { + cfg.AMI = *masterConfig.AMI.ID + cfg.AMIRegion = masterConfig.Placement.Region + } else { + cfg.AMI = sources.AMIID + cfg.AMIRegion = sources.AMIRegion + } + return json.MarshalIndent(cfg, "", " ") } diff --git a/pkg/tfvars/azure/azure.go b/pkg/tfvars/azure/azure.go index a82c4690566..f77fe97cb69 100644 --- a/pkg/tfvars/azure/azure.go +++ b/pkg/tfvars/azure/azure.go @@ -2,7 +2,6 @@ package azure import ( "encoding/json" - "net" "os" "github.com/Azure/go-autorest/autorest/to" @@ -37,8 +36,6 @@ type config struct { ComputeSubnet string `json:"azure_compute_subnet"` PreexistingNetwork bool `json:"azure_preexisting_network"` Private bool `json:"azure_private"` - MachineV4CIDRs []string `json:"azure_machine_v4_cidrs"` - MachineV6CIDRs []string `json:"azure_machine_v6_cidrs"` EmulateSingleStackIPv6 bool `json:"azure_emulate_single_stack_ipv6"` } @@ -51,9 +48,6 @@ type TFVarsSources struct { ImageURL string PreexistingNetwork bool Publish types.PublishingStrategy - - MachineV4CIDRs []net.IPNet - MachineV6CIDRs []net.IPNet } // TFVars generates Azure-specific Terraform variables launching the cluster. @@ -68,14 +62,6 @@ func TFVars(sources TFVarsSources) ([]byte, error) { masterAvailabilityZones[i] = to.String(c.Zone) } - machineV4CIDRStrings, machineV6CIDRStrings := []string{}, []string{} - for _, ipnet := range sources.MachineV4CIDRs { - machineV4CIDRStrings = append(machineV4CIDRStrings, ipnet.String()) - } - for _, ipnet := range sources.MachineV6CIDRs { - machineV6CIDRStrings = append(machineV6CIDRStrings, ipnet.String()) - } - var emulateSingleStackIPv6 bool if os.Getenv("OPENSHIFT_INSTALL_AZURE_EMULATE_SINGLESTACK_IPV6") == "true" { emulateSingleStackIPv6 = true @@ -97,8 +83,6 @@ func TFVars(sources TFVarsSources) ([]byte, error) { ControlPlaneSubnet: masterConfig.Subnet, ComputeSubnet: workerConfig.Subnet, PreexistingNetwork: sources.PreexistingNetwork, - MachineV4CIDRs: machineV4CIDRStrings, - MachineV6CIDRs: machineV6CIDRStrings, EmulateSingleStackIPv6: emulateSingleStackIPv6, } diff --git a/pkg/tfvars/baremetal/baremetal.go b/pkg/tfvars/baremetal/baremetal.go index 5e76ecb1134..da2b9ede117 100644 --- a/pkg/tfvars/baremetal/baremetal.go +++ b/pkg/tfvars/baremetal/baremetal.go @@ -22,6 +22,8 @@ type config struct { BootstrapOSImage string `json:"bootstrap_os_image,omitempty"` ExternalBridge string `json:"external_bridge,omitempty"` ProvisioningBridge string `json:"provisioning_bridge,omitempty"` + IgnitionURL string `json:"ignition_url,omitempty"` + IgnitionURLCACert string `json:"ignition_url_ca_cert,omitempty"` // Data required for control plane deployment - several maps per host, because of terraform's limitations Hosts []map[string]interface{} `json:"hosts"` @@ -32,7 +34,7 @@ type config struct { } // TFVars generates bare metal specific Terraform variables. -func TFVars(libvirtURI, bootstrapProvisioningIP, bootstrapOSImage, externalBridge, provisioningBridge string, platformHosts []*baremetal.Host, image string) ([]byte, error) { +func TFVars(libvirtURI, bootstrapProvisioningIP, bootstrapOSImage, externalBridge, provisioningBridge string, platformHosts []*baremetal.Host, image string, ignitionURL string, ignitionURLCACert string) ([]byte, error) { bootstrapOSImage, err := cache.DownloadImageFile(bootstrapOSImage) if err != nil { return nil, errors.Wrap(err, "failed to use cached bootstrap libvirt image") @@ -132,6 +134,8 @@ func TFVars(libvirtURI, bootstrapProvisioningIP, bootstrapOSImage, externalBridg DriverInfos: driverInfos, RootDevices: rootDevices, InstanceInfos: instanceInfos, + IgnitionURL: ignitionURL, + IgnitionURLCACert: ignitionURLCACert, } return json.MarshalIndent(cfg, "", " ") diff --git a/pkg/tfvars/libvirt/libvirt.go b/pkg/tfvars/libvirt/libvirt.go index e4e5e4927c8..73c0af2afe6 100644 --- a/pkg/tfvars/libvirt/libvirt.go +++ b/pkg/tfvars/libvirt/libvirt.go @@ -10,21 +10,23 @@ import ( "github.com/apparentlymart/go-cidr/cidr" "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" "github.com/openshift/installer/pkg/tfvars/internal/cache" + "github.com/openshift/installer/pkg/types" "github.com/pkg/errors" ) type config struct { - URI string `json:"libvirt_uri,omitempty"` - Image string `json:"os_image,omitempty"` - IfName string `json:"libvirt_network_if"` - MasterIPs []string `json:"libvirt_master_ips,omitempty"` - BootstrapIP string `json:"libvirt_bootstrap_ip,omitempty"` - MasterMemory string `json:"libvirt_master_memory,omitempty"` - MasterVcpu string `json:"libvirt_master_vcpu,omitempty"` + URI string `json:"libvirt_uri,omitempty"` + Image string `json:"os_image,omitempty"` + IfName string `json:"libvirt_network_if"` + MasterIPs []string `json:"libvirt_master_ips,omitempty"` + BootstrapIP string `json:"libvirt_bootstrap_ip,omitempty"` + MasterMemory string `json:"libvirt_master_memory,omitempty"` + MasterVcpu string `json:"libvirt_master_vcpu,omitempty"` + BootstrapMemory int `json:"libvirt_bootstrap_memory,omitempty"` } // TFVars generates libvirt-specific Terraform variables. -func TFVars(masterConfig *v1beta1.LibvirtMachineProviderConfig, osImage string, machineCIDR *net.IPNet, bridge string, masterCount int) ([]byte, error) { +func TFVars(masterConfig *v1beta1.LibvirtMachineProviderConfig, osImage string, machineCIDR *net.IPNet, bridge string, masterCount int, architecture types.Architecture) ([]byte, error) { bootstrapIP, err := cidr.Host(machineCIDR, 10) if err != nil { return nil, errors.Errorf("failed to generate bootstrap IP: %v", err) @@ -50,6 +52,12 @@ func TFVars(masterConfig *v1beta1.LibvirtMachineProviderConfig, osImage string, MasterVcpu: strconv.Itoa(masterConfig.DomainVcpu), } + // Power PC systems typically require more memory because the page size is 64K and not the default 4K + // TODO: need to make ppc64le a supported architecture - https://bugzilla.redhat.com/show_bug.cgi?id=1821392 + if architecture == "ppc64le" { + cfg.BootstrapMemory = 5120 + } + return json.MarshalIndent(cfg, "", " ") } diff --git a/pkg/tfvars/openstack/openstack.go b/pkg/tfvars/openstack/openstack.go index 86774146a81..b14627e448d 100644 --- a/pkg/tfvars/openstack/openstack.go +++ b/pkg/tfvars/openstack/openstack.go @@ -11,34 +11,41 @@ import ( "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" "github.com/gophercloud/utils/openstack/clientconfig" "github.com/openshift/installer/pkg/rhcos" "github.com/openshift/installer/pkg/tfvars/internal/cache" + types_openstack "github.com/openshift/installer/pkg/types/openstack" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" ) type config struct { - BaseImageName string `json:"openstack_base_image_name,omitempty"` - ExternalNetwork string `json:"openstack_external_network,omitempty"` - Cloud string `json:"openstack_credentials_cloud,omitempty"` - FlavorName string `json:"openstack_master_flavor_name,omitempty"` - LbFloatingIP string `json:"openstack_lb_floating_ip,omitempty"` - APIVIP string `json:"openstack_api_int_ip,omitempty"` - DNSVIP string `json:"openstack_node_dns_ip,omitempty"` - IngressVIP string `json:"openstack_ingress_ip,omitempty"` - TrunkSupport string `json:"openstack_trunk_support,omitempty"` - OctaviaSupport string `json:"openstack_octavia_support,omitempty"` - RootVolumeSize int `json:"openstack_master_root_volume_size,omitempty"` - RootVolumeType string `json:"openstack_master_root_volume_type,omitempty"` - BootstrapShim string `json:"openstack_bootstrap_shim_ignition,omitempty"` - ExternalDNS []string `json:"openstack_external_dns,omitempty"` - MasterServerGroupID string `json:"openstack_master_server_group_id,omitempty"` + BaseImageName string `json:"openstack_base_image_name,omitempty"` + ExternalNetwork string `json:"openstack_external_network,omitempty"` + Cloud string `json:"openstack_credentials_cloud,omitempty"` + FlavorName string `json:"openstack_master_flavor_name,omitempty"` + LbFloatingIP string `json:"openstack_lb_floating_ip,omitempty"` + APIVIP string `json:"openstack_api_int_ip,omitempty"` + DNSVIP string `json:"openstack_node_dns_ip,omitempty"` + IngressVIP string `json:"openstack_ingress_ip,omitempty"` + TrunkSupport string `json:"openstack_trunk_support,omitempty"` + OctaviaSupport string `json:"openstack_octavia_support,omitempty"` + RootVolumeSize int `json:"openstack_master_root_volume_size,omitempty"` + RootVolumeType string `json:"openstack_master_root_volume_type,omitempty"` + BootstrapShim string `json:"openstack_bootstrap_shim_ignition,omitempty"` + ExternalDNS []string `json:"openstack_external_dns,omitempty"` + MasterServerGroupID string `json:"openstack_master_server_group_id,omitempty"` + AdditionalNetworkIDs []string `json:"openstack_additional_network_ids,omitempty"` + AdditionalSecurityGroupIDs []string `json:"openstack_master_extra_sg_ids,omitempty"` + MachinesSubnet string `json:"openstack_machines_subnet_id,omitempty"` + MachinesNetwork string `json:"openstack_machines_network_id,omitempty"` } // TFVars generates OpenStack-specific Terraform variables. -func TFVars(masterConfig *v1alpha1.OpenstackProviderSpec, cloud string, externalNetwork string, externalDNS []string, lbFloatingIP string, apiVIP string, dnsVIP string, ingressVIP string, trunkSupport string, octaviaSupport string, baseImage string, infraID string, userCA string, bootstrapIgn string) ([]byte, error) { +func TFVars(masterConfig *v1alpha1.OpenstackProviderSpec, cloud string, externalNetwork string, externalDNS []string, lbFloatingIP string, apiVIP string, dnsVIP string, ingressVIP string, trunkSupport string, octaviaSupport string, baseImage string, infraID string, userCA string, bootstrapIgn string, mpool *types_openstack.MachinePool, machinesSubnet string) ([]byte, error) { cfg := &config{ ExternalNetwork: externalNetwork, @@ -51,6 +58,7 @@ func TFVars(masterConfig *v1alpha1.OpenstackProviderSpec, cloud string, external ExternalDNS: externalDNS, TrunkSupport: trunkSupport, OctaviaSupport: octaviaSupport, + MachinesSubnet: machinesSubnet, } // Normally baseImage contains a URL that we will use to create a new Glance image, but for testing @@ -126,6 +134,36 @@ func TFVars(masterConfig *v1alpha1.OpenstackProviderSpec, cloud string, external cfg.MasterServerGroupID = masterConfig.ServerGroupID + cfg.AdditionalNetworkIDs = []string{} + if mpool.AdditionalNetworkIDs != nil { + for _, networkID := range mpool.AdditionalNetworkIDs { + cfg.AdditionalNetworkIDs = append(cfg.AdditionalNetworkIDs, networkID) + } + } + + cfg.AdditionalSecurityGroupIDs = []string{} + if mpool.AdditionalSecurityGroupIDs != nil { + for _, sgID := range mpool.AdditionalSecurityGroupIDs { + cfg.AdditionalSecurityGroupIDs = append(cfg.AdditionalSecurityGroupIDs, sgID) + } + } + + if machinesSubnet != "" { + cfg.MachinesNetwork, err = getNetworkFromSubnet(cloud, machinesSubnet) + if err != nil { + return nil, err + } + + // Make sure that the network has the primary cluster network tag. + // In the case of multiple networks this tag is required for + // cluster-api-provider-openstack to define which ip address to set as + // the primary one. + err = setNetworkTag(cloud, cfg.MachinesNetwork, infraID+"-primaryClusterNetwork") + if err != nil { + return nil, err + } + } + return json.MarshalIndent(cfg, "", " ") } @@ -218,3 +256,41 @@ func getServiceCatalog(cloud string) (*tokens.ServiceCatalog, error) { return serviceCatalog, nil } + +// getNetworkFromSubnet looks up a subnet in openstack and returns the ID of the network it's a part of +func getNetworkFromSubnet(cloud string, subnetID string) (string, error) { + opts := &clientconfig.ClientOpts{ + Cloud: cloud, + } + + networkClient, err := clientconfig.NewServiceClient("network", opts) + if err != nil { + return "", err + } + + subnet, err := subnets.Get(networkClient, subnetID).Extract() + if err != nil { + return "", err + } + + return subnet.NetworkID, nil +} + +// setNetworkTag sets a tag for the network +func setNetworkTag(cloud string, networkID string, networkTag string) error { + opts := &clientconfig.ClientOpts{ + Cloud: cloud, + } + + networkClient, err := clientconfig.NewServiceClient("network", opts) + if err != nil { + return err + } + + err = attributestags.Add(networkClient, "networks", networkID, networkTag).ExtractErr() + if err != nil { + return err + } + + return nil +} diff --git a/pkg/tfvars/openstack/rhcos_image.go b/pkg/tfvars/openstack/rhcos_image.go index bc91e4fa3eb..3b7e272ba95 100644 --- a/pkg/tfvars/openstack/rhcos_image.go +++ b/pkg/tfvars/openstack/rhcos_image.go @@ -50,11 +50,12 @@ func uploadBaseImage(cloud string, localFilePath string, imageName string, clust // FIXME(mfedosin): We have to temporary disable image import, because it looks // like there are problems on the server side. // Revert this patch when the problems are fixed. - useImageImport := false + // https://github.com/openshift/installer/issues/3403 // useImageImport, err := isImageImportSupported(cloud) // if err != nil { // return err // } + useImageImport := false if useImageImport { logrus.Debugf("Using Image Import API to upload RHCOS to the image %q with ID %q", img.Name, img.ID) diff --git a/pkg/tfvars/tfvars.go b/pkg/tfvars/tfvars.go index 0796a806cec..8b504a13316 100644 --- a/pkg/tfvars/tfvars.go +++ b/pkg/tfvars/tfvars.go @@ -3,18 +3,16 @@ package tfvars import ( "encoding/json" - "net" "strings" ) type config struct { - ClusterID string `json:"cluster_id,omitempty"` - ClusterDomain string `json:"cluster_domain,omitempty"` - BaseDomain string `json:"base_domain,omitempty"` - // DeprecatedMachineCIDR has been replaced with machine_v4_cidrs, use the first - // entry from there instead. - DeprecatedMachineCIDR string `json:"machine_cidr"` - Masters int `json:"master_count,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + ClusterDomain string `json:"cluster_domain,omitempty"` + BaseDomain string `json:"base_domain,omitempty"` + Masters int `json:"master_count,omitempty"` + MachineV4CIDRs []string `json:"machine_v4_cidrs"` + MachineV6CIDRs []string `json:"machine_v6_cidrs"` UseIPv4 bool `json:"use_ipv4"` UseIPv6 bool `json:"use_ipv6"` @@ -24,17 +22,18 @@ type config struct { } // TFVars generates terraform.tfvar JSON for launching the cluster. -func TFVars(clusterID string, clusterDomain string, baseDomain string, deprecatedMachineCIDR *net.IPNet, useIPv4, useIPv6 bool, bootstrapIgn string, masterIgn string, masterCount int) ([]byte, error) { +func TFVars(clusterID string, clusterDomain string, baseDomain string, machineV4CIDRs []string, machineV6CIDRs []string, useIPv4, useIPv6 bool, bootstrapIgn string, masterIgn string, masterCount int) ([]byte, error) { config := &config{ - ClusterID: clusterID, - ClusterDomain: strings.TrimSuffix(clusterDomain, "."), - BaseDomain: strings.TrimSuffix(baseDomain, "."), - DeprecatedMachineCIDR: deprecatedMachineCIDR.String(), - UseIPv4: useIPv4, - UseIPv6: useIPv6, - Masters: masterCount, - IgnitionBootstrap: bootstrapIgn, - IgnitionMaster: masterIgn, + ClusterID: clusterID, + ClusterDomain: strings.TrimSuffix(clusterDomain, "."), + BaseDomain: strings.TrimSuffix(baseDomain, "."), + MachineV4CIDRs: machineV4CIDRs, + MachineV6CIDRs: machineV6CIDRs, + UseIPv4: useIPv4, + UseIPv6: useIPv6, + Masters: masterCount, + IgnitionBootstrap: bootstrapIgn, + IgnitionMaster: masterIgn, } return json.MarshalIndent(config, "", " ") diff --git a/pkg/tfvars/vsphere/vsphere.go b/pkg/tfvars/vsphere/vsphere.go index d2ded8b7a04..8ab85e01a8e 100644 --- a/pkg/tfvars/vsphere/vsphere.go +++ b/pkg/tfvars/vsphere/vsphere.go @@ -3,7 +3,7 @@ package vsphere import ( "encoding/json" - vsphereapis "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1" + vsphereapis "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" "github.com/pkg/errors" "github.com/openshift/installer/pkg/tfvars/internal/cache" diff --git a/pkg/types/aws/metadata.go b/pkg/types/aws/metadata.go index 08d18257a9f..300077f037f 100644 --- a/pkg/types/aws/metadata.go +++ b/pkg/types/aws/metadata.go @@ -4,6 +4,12 @@ package aws type Metadata struct { Region string `json:"region"` + // ServiceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []ServiceEndpoint `json:"serviceEndpoints,omitempty"` + // Identifier holds a slice of filter maps. The maps hold the // key/value pairs for the tags we will be matching against. A // resource matches the map if all of the key/value pairs are in its diff --git a/pkg/types/aws/platform.go b/pkg/types/aws/platform.go index 45d0911ec51..e78533f050a 100644 --- a/pkg/types/aws/platform.go +++ b/pkg/types/aws/platform.go @@ -21,9 +21,28 @@ type Platform struct { // +optional UserTags map[string]string `json:"userTags,omitempty"` + // ServiceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []ServiceEndpoint `json:"serviceEndpoints,omitempty"` + // DefaultMachinePlatform is the default configuration used when // installing on AWS for machine pools which do not define their own // platform configuration. // +optional DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"` } + +// ServiceEndpoint store the configuration for services to +// override existing defaults of AWS Services. +type ServiceEndpoint struct { + // Name is the name of the AWS service. + // This must be provided and cannot be empty. + Name string `json:"name"` + + // URL is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + URL string `json:"url"` +} diff --git a/pkg/types/aws/validation/platform.go b/pkg/types/aws/validation/platform.go index 183bfb13e8f..64bdf4d11e0 100644 --- a/pkg/types/aws/validation/platform.go +++ b/pkg/types/aws/validation/platform.go @@ -1,63 +1,71 @@ package validation import ( - "sort" + "fmt" + "net/url" + "regexp" "k8s.io/apimachinery/pkg/util/validation/field" "github.com/openshift/installer/pkg/types/aws" ) -var ( - // Regions is a map of the known AWS regions. The key of the map is - // the short name of the region. The value of the map is the long - // name of the region. - Regions = map[string]string{ - //"ap-east-1": "Hong Kong", - "ap-northeast-1": "Tokyo", - "ap-northeast-2": "Seoul", - //"ap-northeast-3": "Osaka-Local", - "ap-south-1": "Mumbai", - "ap-southeast-1": "Singapore", - "ap-southeast-2": "Sydney", - "ca-central-1": "Central", - //"cn-north-1": "Beijing", - //"cn-northwest-1": "Ningxia", - "eu-central-1": "Frankfurt", - "eu-north-1": "Stockholm", - "eu-west-1": "Ireland", - "eu-west-2": "London", - "eu-west-3": "Paris", - "me-south-1": "Bahrain", - "sa-east-1": "São Paulo", - "us-east-1": "N. Virginia", - "us-east-2": "Ohio", - //"us-gov-east-1": "AWS GovCloud (US-East)", - //"us-gov-west-1": "AWS GovCloud (US-West)", - "us-west-1": "N. California", - "us-west-2": "Oregon", - } - - validRegionValues = func() []string { - validValues := make([]string, len(Regions)) - i := 0 - for r := range Regions { - validValues[i] = r - i++ - } - sort.Strings(validValues) - return validValues - }() -) - // ValidatePlatform checks that the specified platform is valid. func ValidatePlatform(p *aws.Platform, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if _, ok := Regions[p.Region]; !ok { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("region"), p.Region, validRegionValues)) + + if p.Region == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("region"), "region must be specified")) } + + allErrs = append(allErrs, validateServiceEndpoints(p.ServiceEndpoints, fldPath.Child("serviceEndpoints"))...) + if p.DefaultMachinePlatform != nil { allErrs = append(allErrs, ValidateMachinePool(p, p.DefaultMachinePlatform, fldPath.Child("defaultMachinePlatform"))...) } return allErrs } + +func validateServiceEndpoints(endpoints []aws.ServiceEndpoint, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + tracker := map[string]int{} + for idx, e := range endpoints { + fldp := fldPath.Index(idx) + if eidx, ok := tracker[e.Name]; ok { + allErrs = append(allErrs, field.Invalid(fldp.Child("name"), e.Name, fmt.Sprintf("duplicate service endpoint not allowed for %s, service endpoint already defined at %s", e.Name, fldPath.Index(eidx)))) + } else { + tracker[e.Name] = idx + } + + if err := validateServiceURL(e.URL); err != nil { + allErrs = append(allErrs, field.Invalid(fldp.Child("url"), e.URL, err.Error())) + } + } + return allErrs +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +func validateServiceURL(uri string) error { + endpoint := uri + if !schemeRE.MatchString(endpoint) { + scheme := "https" + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + u, err := url.Parse(endpoint) + if err != nil { + return err + } + if u.Hostname() == "" { + return fmt.Errorf("host cannot be empty, empty host provided") + } + if s := u.Scheme; s != "https" { + return fmt.Errorf("invalid scheme %s, only https allowed", s) + } + if r := u.RequestURI(); r != "/" { + return fmt.Errorf("no path or request parameters must be provided, %q was provided", r) + } + + return nil +} diff --git a/pkg/types/aws/validation/platform_test.go b/pkg/types/aws/validation/platform_test.go index 957cc38f81e..3626d7e0f36 100644 --- a/pkg/types/aws/validation/platform_test.go +++ b/pkg/types/aws/validation/platform_test.go @@ -13,21 +13,90 @@ func TestValidatePlatform(t *testing.T) { cases := []struct { name string platform *aws.Platform - valid bool + expected string }{ { name: "minimal", platform: &aws.Platform{ Region: "us-east-1", }, - valid: true, }, { name: "invalid region", platform: &aws.Platform{ - Region: "bad-region", + Region: "", }, - valid: false, + expected: `^test-path\.region: Required value: region must be specified$`, + }, + { + name: "invalid url for service endpoint", + platform: &aws.Platform{ + Region: "us-east-1", + ServiceEndpoints: []aws.ServiceEndpoint{{ + Name: "ec2", + URL: "/path/some", + }}, + }, + expected: `^test-path\.serviceEndpoints\[0\]\.url: Invalid value: "(.*)": host cannot be empty, empty host provided$`, + }, + { + name: "invalid url for service endpoint", + platform: &aws.Platform{ + Region: "us-east-1", + ServiceEndpoints: []aws.ServiceEndpoint{{ + Name: "ec2", + URL: "https://test-ec2.random.local/path/some", + }}, + }, + expected: `^test-path\.serviceEndpoints\[0\]\.url: Invalid value: "(.*)": no path or request parameters must be provided, "/path/some" was provided$`, + }, + { + name: "invalid url for service endpoint", + platform: &aws.Platform{ + Region: "us-east-1", + ServiceEndpoints: []aws.ServiceEndpoint{{ + Name: "ec2", + URL: "https://test-ec2.random.local?foo=some", + }}, + }, + expected: `^test-path\.serviceEndpoints\[0\]\.url: Invalid value: "(.*)": no path or request parameters must be provided, "/\?foo=some" was provided$`, + }, + { + name: "valid url for service endpoint", + platform: &aws.Platform{ + Region: "us-east-1", + ServiceEndpoints: []aws.ServiceEndpoint{{ + Name: "ec2", + URL: "test-ec2.random.local", + }}, + }, + }, + { + name: "valid url for service endpoint", + platform: &aws.Platform{ + Region: "us-east-1", + ServiceEndpoints: []aws.ServiceEndpoint{{ + Name: "ec2", + URL: "https://test-ec2.random.local", + }}, + }, + }, + { + name: "duplicate service endpoints", + platform: &aws.Platform{ + Region: "us-east-1", + ServiceEndpoints: []aws.ServiceEndpoint{{ + Name: "ec2", + URL: "test-ec2.random.local", + }, { + Name: "s3", + URL: "test-ec2.random.local", + }, { + Name: "ec2", + URL: "test-ec2.random.local", + }}, + }, + expected: `^test-path\.serviceEndpoints\[2\]\.name: Invalid value: "ec2": duplicate service endpoint not allowed for ec2, service endpoint already defined at test-path\.serviceEndpoints\[0\]$`, }, { name: "valid machine pool", @@ -35,7 +104,6 @@ func TestValidatePlatform(t *testing.T) { Region: "us-east-1", DefaultMachinePlatform: &aws.MachinePool{}, }, - valid: true, }, { name: "invalid machine pool", @@ -47,16 +115,16 @@ func TestValidatePlatform(t *testing.T) { }, }, }, - valid: false, + expected: `^test-path\.defaultMachinePlatform\.iops: Invalid value: -10: Storage IOPS must be positive$`, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { err := ValidatePlatform(tc.platform, field.NewPath("test-path")).ToAggregate() - if tc.valid { + if tc.expected == "" { assert.NoError(t, err) } else { - assert.Error(t, err) + assert.Regexp(t, tc.expected, err) } }) } diff --git a/pkg/types/baremetal/validation/platform.go b/pkg/types/baremetal/validation/platform.go index d48acd7dee9..c13642308a9 100644 --- a/pkg/types/baremetal/validation/platform.go +++ b/pkg/types/baremetal/validation/platform.go @@ -8,8 +8,11 @@ import ( "reflect" "strings" + "github.com/apparentlymart/go-cidr/cidr" "github.com/go-playground/validator/v10" + "github.com/pkg/errors" + "github.com/openshift/installer/pkg/ipnet" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/baremetal" "github.com/openshift/installer/pkg/validate" @@ -42,6 +45,44 @@ func validateIPNotinMachineCIDR(ip string, n *types.Networking) error { return nil } +func validateNoOverlapMachineCIDR(target *net.IPNet, n *types.Networking) error { + allIPv4 := ipnet.MustParseCIDR("0.0.0.0/0") + allIPv6 := ipnet.MustParseCIDR("::/0") + netIsIPv6 := target.IP.To4() == nil + + for _, machineCIDR := range n.MachineNetwork { + machineCIDRisIPv6 := machineCIDR.CIDR.IP.To4() == nil + + // Only compare if both are the same IP version + if netIsIPv6 == machineCIDRisIPv6 { + var err error + if netIsIPv6 { + err = cidr.VerifyNoOverlap( + []*net.IPNet{ + target, + &machineCIDR.CIDR.IPNet, + }, + &allIPv6.IPNet, + ) + } else { + err = cidr.VerifyNoOverlap( + []*net.IPNet{ + target, + &machineCIDR.CIDR.IPNet, + }, + &allIPv4.IPNet, + ) + } + + if err != nil { + return errors.Wrap(err, "cannot overlap with machine network") + } + } + } + + return nil +} + func validateOSImageURI(uri string) error { // Check for valid URI and sha256 checksum part of the URL parsedURL, err := url.ParseRequestURI(uri) @@ -153,8 +194,28 @@ func validateOSImages(p *baremetal.Platform, fldPath *field.Path) field.ErrorLis return platformErrs } +func validateHostsCount(hosts []*baremetal.Host, installConfig *types.InstallConfig) error { + + hostsNum := int64(len(hosts)) + counter := int64(0) + + for _, worker := range installConfig.Compute { + if worker.Replicas != nil { + counter += *worker.Replicas + } + } + if installConfig.ControlPlane != nil && installConfig.ControlPlane.Replicas != nil { + counter += *installConfig.ControlPlane.Replicas + } + if hostsNum < counter { + return fmt.Errorf("not enough hosts found (%v) to support all the configured ControlPlane and Compute replicas (%v)", hostsNum, counter) + } + + return nil +} + // ValidatePlatform checks that the specified platform is valid. -func ValidatePlatform(p *baremetal.Platform, n *types.Networking, fldPath *field.Path) field.ErrorList { +func ValidatePlatform(p *baremetal.Platform, n *types.Networking, fldPath *field.Path, c *types.InstallConfig) field.ErrorList { allErrs := field.ErrorList{} if err := validate.URI(p.LibvirtURI); err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("libvirtURI"), p.LibvirtURI, err.Error())) @@ -164,16 +225,25 @@ func ValidatePlatform(p *baremetal.Platform, n *types.Networking, fldPath *field allErrs = append(allErrs, field.Invalid(fldPath.Child("provisioningHostIP"), p.ClusterProvisioningIP, err.Error())) } - if p.ProvisioningNetworkCIDR != nil && !p.ProvisioningNetworkCIDR.Contains(net.ParseIP(p.ClusterProvisioningIP)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterProvisioningIP"), p.ClusterProvisioningIP, fmt.Sprintf("%q is not in the provisioning network", p.ClusterProvisioningIP))) - } - if err := validate.IP(p.BootstrapProvisioningIP); err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("bootstrapProvisioningIP"), p.BootstrapProvisioningIP, err.Error())) } - if p.ProvisioningNetworkCIDR != nil && !p.ProvisioningNetworkCIDR.Contains(net.ParseIP(p.BootstrapProvisioningIP)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("bootstrapProvisioningIP"), p.BootstrapProvisioningIP, fmt.Sprintf("%q is not in the provisioning network", p.BootstrapProvisioningIP))) + if p.ProvisioningNetworkCIDR != nil { + // Ensure provisioningNetworkCIDR doesn't overlap with any machine network + if err := validateNoOverlapMachineCIDR(&p.ProvisioningNetworkCIDR.IPNet, n); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("provisioningNetworkCIDR"), p.ProvisioningNetworkCIDR.String(), err.Error())) + } + + // Ensure bootstrapProvisioningIP is in the provisioningNetworkCIDR + if !p.ProvisioningNetworkCIDR.Contains(net.ParseIP(p.BootstrapProvisioningIP)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("bootstrapProvisioningIP"), p.BootstrapProvisioningIP, fmt.Sprintf("%q is not in the provisioning network", p.BootstrapProvisioningIP))) + } + + // Ensure clusterProvisioningIP is in the provisioningNetworkCIDR + if !p.ProvisioningNetworkCIDR.Contains(net.ParseIP(p.ClusterProvisioningIP)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterProvisioningIP"), p.ClusterProvisioningIP, fmt.Sprintf("%q is not in the provisioning network", p.ClusterProvisioningIP))) + } } if p.ProvisioningDHCPRange != "" { @@ -238,6 +308,10 @@ func ValidatePlatform(p *baremetal.Platform, n *types.Networking, fldPath *field allErrs = append(allErrs, field.Invalid(fldPath.Child("bootstrapHostIP"), p.BootstrapProvisioningIP, err.Error())) } + if err := validateHostsCount(p.Hosts, c); err != nil { + allErrs = append(allErrs, field.Required(fldPath.Child("Hosts"), err.Error())) + } + allErrs = append(allErrs, validateOSImages(p, fldPath)...) allErrs = append(allErrs, validateHosts(p.Hosts, fldPath)...) diff --git a/pkg/types/baremetal/validation/platform_test.go b/pkg/types/baremetal/validation/platform_test.go index ffd5d97736c..c6fc3ce7392 100644 --- a/pkg/types/baremetal/validation/platform_test.go +++ b/pkg/types/baremetal/validation/platform_test.go @@ -51,6 +51,7 @@ func TestValidatePlatform(t *testing.T) { cases := []struct { name string + config *types.InstallConfig platform *baremetal.Platform expected string }{ @@ -142,6 +143,13 @@ func TestValidatePlatform(t *testing.T) { ProvisioningNetworkInterface("").build(), expected: "Invalid value: \"\": no provisioning network interface is configured, please set this value to be the interface on the provisioning network on your cluster's baremetal hosts", }, + + { + name: "invalid_provisioning_network_overlapping_CIDR", + platform: platform().ProvisioningNetworkCIDR("192.168.111.192/23").build(), + expected: "Invalid value: \"192.168.111.192/23\": cannot overlap with machine network: 192.168.111.0/24 overlaps with 192.168.111.192/23", + }, + { name: "invalid_clusterprovip_machineCIDR", platform: platform(). @@ -218,61 +226,91 @@ func TestValidatePlatform(t *testing.T) { name: "duplicate_bmc_address", platform: platform(). Hosts( - host1().BMCAddress("ipmi://192.168.111.1").build(), - host2().BMCAddress("ipmi://192.168.111.1").build()).build(), + host1().BMCAddress("ipmi://192.168.111.1"), + host2().BMCAddress("ipmi://192.168.111.1")).build(), expected: "baremetal.hosts\\[1\\].BMC.Address: Duplicate value: \"ipmi://192.168.111.1\"", }, { name: "bmc_address_required", platform: platform(). - Hosts(host1().BMCAddress("").build()).build(), + Hosts(host1().BMCAddress("")).build(), expected: "baremetal.hosts\\[0\\].BMC.Address: Required value: missing Address", }, { name: "bmc_username_required", platform: platform(). - Hosts(host1().BMCUsername("").build()).build(), + Hosts(host1().BMCUsername("")).build(), expected: "baremetal.hosts\\[0\\].BMC.Username: Required value: missing Username", }, { name: "bmc_password_required", platform: platform(). - Hosts(host1().BMCPassword("").build()).build(), + Hosts(host1().BMCPassword("")).build(), expected: "baremetal.hosts\\[0\\].BMC.Password: Required value: missing Password", }, { name: "duplicate_host_name", platform: platform(). Hosts( - host1().Name("host1").build(), - host2().Name("host1").build()).build(), + host1().Name("host1"), + host2().Name("host1")).build(), expected: "baremetal.hosts\\[1\\].Name: Duplicate value: \"host1\"", }, { name: "duplicate_host_mac", platform: platform(). Hosts( - host1().BootMACAddress("CA:FE:CA:FE:CA:FE").build(), - host2().BootMACAddress("CA:FE:CA:FE:CA:FE").build()).build(), + host1().BootMACAddress("CA:FE:CA:FE:CA:FE"), + host2().BootMACAddress("CA:FE:CA:FE:CA:FE")).build(), expected: "baremetal.hosts\\[1\\].BootMACAddress: Duplicate value: \"CA:FE:CA:FE:CA:FE\"", }, { name: "missing_name", platform: platform(). - Hosts(host1().Name("").build()).build(), + Hosts(host1().Name("")).build(), expected: "baremetal.hosts\\[0\\].Name: Required value: missing Name", }, { name: "missing_mac", platform: platform(). - Hosts(host1().BootMACAddress("").build()).build(), + Hosts(host1().BootMACAddress("")).build(), expected: "baremetal.hosts\\[0\\].BootMACAddress: Required value: missing BootMACAddress", }, + { + name: "toofew_hosts", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1())). + ControlPlane( + machinePool().Replicas(3)). + Compute( + machinePool().Replicas(2), + machinePool().Replicas(3)).build(), + expected: "baremetal.Hosts: Required value: not enough hosts found \\(1\\) to support all the configured ControlPlane and Compute replicas \\(8\\)", + }, + { + name: "enough_hosts", + config: installConfig(). + BareMetalPlatform( + platform().Hosts( + host1(), + host2())). + ControlPlane( + machinePool().Replicas(2)).build(), + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - err := ValidatePlatform(tc.platform, network(), field.NewPath("baremetal")).ToAggregate() + //Build default wrapping installConfig + if tc.config == nil { + tc.config = installConfig().build() + tc.config.BareMetal = tc.platform + } + + err := ValidatePlatform(tc.config.BareMetal, network(), field.NewPath("baremetal"), tc.config).ToAggregate() + if tc.expected == "" { assert.NoError(t, err) } else { @@ -413,8 +451,11 @@ func (pb *platformBuilder) IngressVIP(value string) *platformBuilder { return pb } -func (pb *platformBuilder) Hosts(value ...*baremetal.Host) *platformBuilder { - pb.Platform.Hosts = value +func (pb *platformBuilder) Hosts(builders ...*hostBuilder) *platformBuilder { + pb.Platform.Hosts = nil + for _, builder := range builders { + pb.Platform.Hosts = append(pb.Platform.Hosts, builder.build()) + } return pb } @@ -441,3 +482,57 @@ func (pb *platformBuilder) ProvisioningNetworkInterface(value string) *platformB func network() *types.Networking { return &types.Networking{MachineNetwork: []types.MachineNetworkEntry{{CIDR: *ipnet.MustParseCIDR("192.168.111.0/24")}}} } + +type installConfigBuilder struct { + types.InstallConfig +} + +func installConfig() *installConfigBuilder { + return &installConfigBuilder{ + InstallConfig: types.InstallConfig{}, + } +} + +func (icb *installConfigBuilder) build() *types.InstallConfig { + return &icb.InstallConfig +} + +func (icb *installConfigBuilder) BareMetalPlatform(builder *platformBuilder) *installConfigBuilder { + icb.InstallConfig.Platform = types.Platform{ + BareMetal: builder.build(), + } + return icb +} + +func (icb *installConfigBuilder) ControlPlane(builder *machinePoolBuilder) *installConfigBuilder { + icb.InstallConfig.ControlPlane = builder.build() + + return icb +} + +func (icb *installConfigBuilder) Compute(builders ...*machinePoolBuilder) *installConfigBuilder { + icb.InstallConfig.Compute = nil + for _, builder := range builders { + icb.InstallConfig.Compute = append(icb.InstallConfig.Compute, *builder.build()) + } + return icb +} + +type machinePoolBuilder struct { + types.MachinePool +} + +func machinePool() *machinePoolBuilder { + return &machinePoolBuilder{ + MachinePool: types.MachinePool{}, + } +} + +func (mpb *machinePoolBuilder) build() *types.MachinePool { + return &mpb.MachinePool +} + +func (mpb *machinePoolBuilder) Replicas(count int64) *machinePoolBuilder { + mpb.MachinePool.Replicas = &count + return mpb +} diff --git a/pkg/types/config/openstack/OWNERS b/pkg/types/config/openstack/OWNERS deleted file mode 100644 index ea6fcb46def..00000000000 --- a/pkg/types/config/openstack/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md -# This file just uses aliases defined in OWNERS_ALIASES. - -approvers: - - openstack-approvers diff --git a/pkg/types/defaults/installconfig.go b/pkg/types/defaults/installconfig.go index ba9ed26c96a..629bf0ae488 100644 --- a/pkg/types/defaults/installconfig.go +++ b/pkg/types/defaults/installconfig.go @@ -76,7 +76,7 @@ func SetInstallConfigDefaults(c *types.InstallConfig) { case c.Platform.Libvirt != nil: libvirtdefaults.SetPlatformDefaults(c.Platform.Libvirt) case c.Platform.OpenStack != nil: - openstackdefaults.SetPlatformDefaults(c.Platform.OpenStack) + openstackdefaults.SetPlatformDefaults(c.Platform.OpenStack, c.Networking) case c.Platform.VSphere != nil: vspheredefaults.SetPlatformDefaults(c.Platform.VSphere, c) case c.Platform.BareMetal != nil: diff --git a/pkg/types/defaults/installconfig_test.go b/pkg/types/defaults/installconfig_test.go index 90345fc5387..7afc96f5ec5 100644 --- a/pkg/types/defaults/installconfig_test.go +++ b/pkg/types/defaults/installconfig_test.go @@ -68,7 +68,7 @@ func defaultLibvirtInstallConfig() *types.InstallConfig { func defaultOpenStackInstallConfig() *types.InstallConfig { c := defaultInstallConfig() c.Platform.OpenStack = &openstack.Platform{} - openstackdefaults.SetPlatformDefaults(c.Platform.OpenStack) + openstackdefaults.SetPlatformDefaults(c.Platform.OpenStack, c.Networking) return c } diff --git a/pkg/types/installconfig.go b/pkg/types/installconfig.go index fa92d3a6d50..6c509d28eba 100644 --- a/pkg/types/installconfig.go +++ b/pkg/types/installconfig.go @@ -33,6 +33,7 @@ var ( gcp.Name, openstack.Name, ovirt.Name, + vsphere.Name, } // HiddenPlatformNames is a slice with all the // hidden-but-supported platform names. This list isn't presented @@ -40,7 +41,6 @@ var ( HiddenPlatformNames = []string{ baremetal.Name, none.Name, - vsphere.Name, } ) diff --git a/pkg/types/openstack/defaults/platform.go b/pkg/types/openstack/defaults/platform.go index 9852933060e..4ea513e7f1d 100644 --- a/pkg/types/openstack/defaults/platform.go +++ b/pkg/types/openstack/defaults/platform.go @@ -16,21 +16,31 @@ const ( ) // SetPlatformDefaults sets the defaults for the platform. -func SetPlatformDefaults(p *openstack.Platform) { +func SetPlatformDefaults(p *openstack.Platform, n *types.Networking) { if p.Cloud == "" { p.Cloud = os.Getenv("OS_CLOUD") if p.Cloud == "" { p.Cloud = DefaultCloudName } } -} + // APIVIP returns the internal virtual IP address (VIP) put in front + // of the Kubernetes API server for use by components inside the + // cluster. The DNS static pods running on the nodes resolve the + // api-int record to APIVIP. + if p.APIVIP == "" { + vip, _ := cidr.Host(&n.MachineNetwork[0].CIDR.IPNet, 5) + p.APIVIP = vip.String() + } -// APIVIP returns the internal virtual IP address (VIP) put in front -// of the Kubernetes API server for use by components inside the -// cluster. The DNS static pods running on the nodes resolve the -// api-int record to APIVIP. -func APIVIP(networking *types.Networking) (net.IP, error) { - return cidr.Host(&networking.MachineNetwork[0].CIDR.IPNet, 5) + // IngressVIP returns the internal virtual IP address (VIP) put in + // front of the OpenShift router pods. This provides the internal + // accessibility to the internal pods running on the worker nodes, + // e.g. `console`. The DNS static pods running on the nodes resolve + // the wildcard apps record to IngressVIP. + if p.IngressVIP == "" { + vip, _ := cidr.Host(&n.MachineNetwork[0].CIDR.IPNet, 7) + p.IngressVIP = vip.String() + } } // DNSVIP returns the internal virtual IP address (VIP) put in front @@ -40,12 +50,3 @@ func APIVIP(networking *types.Networking) (net.IP, error) { func DNSVIP(networking *types.Networking) (net.IP, error) { return cidr.Host(&networking.MachineNetwork[0].CIDR.IPNet, 6) } - -// IngressVIP returns the internal virtual IP address (VIP) put in -// front of the OpenShift router pods. This provides the internal -// accessibility to the internal pods running on the worker nodes, -// e.g. `console`. The DNS static pods running on the nodes resolve -// the wildcard apps record to IngressVIP. -func IngressVIP(networking *types.Networking) (net.IP, error) { - return cidr.Host(&networking.MachineNetwork[0].CIDR.IPNet, 7) -} diff --git a/pkg/types/openstack/machinepool.go b/pkg/types/openstack/machinepool.go index b05437932a4..89d93d35862 100644 --- a/pkg/types/openstack/machinepool.go +++ b/pkg/types/openstack/machinepool.go @@ -11,6 +11,17 @@ type MachinePool struct { // The instances use ephemeral disks if not set. // +optional RootVolume *RootVolume `json:"rootVolume,omitempty"` + + // AdditionalNetworkIDs contains IDs of additional networks for machines, + // where each ID is presented in UUID v4 format. + // Allowed address pairs won't be created for the additional networks. + // +optional + AdditionalNetworkIDs []string `json:"additionalNetworkIDs,omitempty"` + + // AdditionalSecurityGroupIDs contains IDs of additional security groups for machines, + // where each ID is presented in UUID v4 format. + // +optional + AdditionalSecurityGroupIDs []string `json:"additionalSecurityGroupIDs,omitempty"` } // Set sets the values from `required` to `a`. @@ -30,6 +41,14 @@ func (o *MachinePool) Set(required *MachinePool) { o.RootVolume.Size = required.RootVolume.Size o.RootVolume.Type = required.RootVolume.Type } + + if required.AdditionalNetworkIDs != nil { + o.AdditionalNetworkIDs = append(required.AdditionalNetworkIDs[:0:0], required.AdditionalNetworkIDs...) + } + + if required.AdditionalSecurityGroupIDs != nil { + o.AdditionalSecurityGroupIDs = append(required.AdditionalSecurityGroupIDs[:0:0], required.AdditionalSecurityGroupIDs...) + } } // RootVolume defines the storage for an instance. diff --git a/pkg/types/openstack/platform.go b/pkg/types/openstack/platform.go index 183cb92f7bf..f4145dc2a1f 100644 --- a/pkg/types/openstack/platform.go +++ b/pkg/types/openstack/platform.go @@ -43,4 +43,20 @@ type Platform struct { // the default OS image for cluster nodes, or an existing Glance image name. // +optional ClusterOSImage string `json:"clusterOSImage,omitempty"` + + // APIVIP is the static IP on the nodes subnet that the api port for openshift will be assigned + // Default: will be set to the 5 on the first entry in the machineNetwork CIDR + // +optional + APIVIP string `json:"apiVIP,omitempty"` + + // IngressVIP is the static IP on the nodes subnet that the apps port for openshift will be assigned + // Default: will be set to the 7 on the first entry in the machineNewtwork CIDR + // +optional + IngressVIP string `json:"ingressVIP,omitempty"` + + // MachinesSubnet is the UUIDv4 of an openstack subnet. This subnet will be used by all nodes created by the installer. + // By setting this, the installer will no longer create a network and subnet. + // The subnet and network specified in MachinesSubnet will not be deleted or modified by the installer. + // +optional + MachinesSubnet string `json:"machinesSubnet,omitempty"` } diff --git a/pkg/types/openstack/validation/machinepool.go b/pkg/types/openstack/validation/machinepool.go index 27184fe097a..6fd402e7a29 100644 --- a/pkg/types/openstack/validation/machinepool.go +++ b/pkg/types/openstack/validation/machinepool.go @@ -3,6 +3,7 @@ package validation import ( "k8s.io/apimachinery/pkg/util/validation/field" + guuid "github.com/google/uuid" "github.com/openshift/installer/pkg/types/openstack" ) @@ -20,5 +21,35 @@ func ValidateMachinePool(p *openstack.MachinePool, fldPath *field.Path) field.Er } } + allErrs = append(allErrs, validateUUIDV4s(p.AdditionalNetworkIDs, fldPath.Child("additionalNetworkIDs"))...) + allErrs = append(allErrs, validateUUIDV4s(p.AdditionalSecurityGroupIDs, fldPath.Child("additionalSecurityGroupIDs"))...) + + return allErrs +} + +func validateUUIDV4s(input []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for idx, uuid := range input { + if !validUUIDv4(uuid) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(idx), uuid, "valid UUID v4 must be specified")) + } + } + return allErrs } + +// validUUIDv4 checks if string is in UUID v4 format +// For more information: https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random) +func validUUIDv4(s string) bool { + uuid, err := guuid.Parse(s) + if err != nil { + return false + } + + // check that version of the uuid + if uuid.Version().String() != "VERSION_4" { + return false + } + + return true +} diff --git a/pkg/types/openstack/validation/machinepool_test.go b/pkg/types/openstack/validation/machinepool_test.go index 84f0560194d..84b1950dada 100644 --- a/pkg/types/openstack/validation/machinepool_test.go +++ b/pkg/types/openstack/validation/machinepool_test.go @@ -56,6 +56,70 @@ func TestValidateMachinePool(t *testing.T) { }, }, }, + { + name: "valid additional network ids", + pool: &openstack.MachinePool{ + AdditionalNetworkIDs: []string{ + "51e5fe10-5325-4a32-bce8-7ebe9708c453", + "3ade1375-acfd-4eda-90be-3530af4f25ec", + "460e993b-e932-43c6-a7a2-e51ca58f4eae", + }, + }, + }, + { + name: "invalid additional network ids", + pool: &openstack.MachinePool{ + AdditionalNetworkIDs: []string{ + "51e5fe10-5325-4a32-bce8-7ebe9708c453", + "INVALID", + "", + }, + }, + expected: `^\[test-path.additionalNetworkIDs\[1\]: Invalid value: \"INVALID\": valid UUID v4 must be specified, test-path.additionalNetworkIDs\[2\]: Invalid value: \"\": valid UUID v4 must be specified\]$`, + }, + { + name: "wrong additional network ids version", + pool: &openstack.MachinePool{ + AdditionalNetworkIDs: []string{ + "25b91ff0-75c3-11ea-9aff-4fc68ed06d45", // VERSION_1 + "bd15ec47-a3ec-329b-812b-9c617ca86881", // VERSION_3 + "39499c61-11eb-5f02-a519-f2e38575cedd", // VERSION_5 + }, + }, + expected: `^\[test-path.additionalNetworkIDs\[0\]: Invalid value: \"25b91ff0-75c3-11ea-9aff-4fc68ed06d45\": valid UUID v4 must be specified, test-path.additionalNetworkIDs\[1\]: Invalid value: \"bd15ec47-a3ec-329b-812b-9c617ca86881\": valid UUID v4 must be specified, test-path.additionalNetworkIDs\[2\]: Invalid value: \"39499c61-11eb-5f02-a519-f2e38575cedd\": valid UUID v4 must be specified\]$`, + }, + { + name: "valid additional security group ids", + pool: &openstack.MachinePool{ + AdditionalSecurityGroupIDs: []string{ + "51e5fe10-5325-4a32-bce8-7ebe9708c453", + "3ade1375-acfd-4eda-90be-3530af4f25ec", + "460e993b-e932-43c6-a7a2-e51ca58f4eae", + }, + }, + }, + { + name: "invalid additional security group ids", + pool: &openstack.MachinePool{ + AdditionalSecurityGroupIDs: []string{ + "51e5fe10-5325-4a32-bce8-7ebe9708c453", + "INVALID", + "", + }, + }, + expected: `^\[test-path.additionalSecurityGroupIDs\[1\]: Invalid value: \"INVALID\": valid UUID v4 must be specified, test-path.additionalSecurityGroupIDs\[2\]: Invalid value: \"\": valid UUID v4 must be specified\]$`, + }, + { + name: "wrong additional security group ids version", + pool: &openstack.MachinePool{ + AdditionalSecurityGroupIDs: []string{ + "25b91ff0-75c3-11ea-9aff-4fc68ed06d45", // VERSION_1 + "bd15ec47-a3ec-329b-812b-9c617ca86881", // VERSION_3 + "39499c61-11eb-5f02-a519-f2e38575cedd", // VERSION_5 + }, + }, + expected: `^\[test-path.additionalSecurityGroupIDs\[0\]: Invalid value: \"25b91ff0-75c3-11ea-9aff-4fc68ed06d45\": valid UUID v4 must be specified, test-path.additionalSecurityGroupIDs\[1\]: Invalid value: \"bd15ec47-a3ec-329b-812b-9c617ca86881\": valid UUID v4 must be specified, test-path.additionalSecurityGroupIDs\[2\]: Invalid value: \"39499c61-11eb-5f02-a519-f2e38575cedd\": valid UUID v4 must be specified\]$`, + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/types/openstack/validation/mock/validvaluesfetcher_generated.go b/pkg/types/openstack/validation/mock/validvaluesfetcher_generated.go index 72ca47bafff..8f2febe20d9 100644 --- a/pkg/types/openstack/validation/mock/validvaluesfetcher_generated.go +++ b/pkg/types/openstack/validation/mock/validvaluesfetcher_generated.go @@ -9,30 +9,30 @@ import ( reflect "reflect" ) -// MockValidValuesFetcher is a mock of ValidValuesFetcher interface +// MockValidValuesFetcher is a mock of ValidValuesFetcher interface. type MockValidValuesFetcher struct { ctrl *gomock.Controller recorder *MockValidValuesFetcherMockRecorder } -// MockValidValuesFetcherMockRecorder is the mock recorder for MockValidValuesFetcher +// MockValidValuesFetcherMockRecorder is the mock recorder for MockValidValuesFetcher. type MockValidValuesFetcherMockRecorder struct { mock *MockValidValuesFetcher } -// NewMockValidValuesFetcher creates a new mock instance +// NewMockValidValuesFetcher creates a new mock instance. func NewMockValidValuesFetcher(ctrl *gomock.Controller) *MockValidValuesFetcher { mock := &MockValidValuesFetcher{ctrl: ctrl} mock.recorder = &MockValidValuesFetcherMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockValidValuesFetcher) EXPECT() *MockValidValuesFetcherMockRecorder { return m.recorder } -// GetCloudNames mocks base method +// GetCloudNames mocks base method. func (m *MockValidValuesFetcher) GetCloudNames() ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCloudNames") @@ -41,13 +41,13 @@ func (m *MockValidValuesFetcher) GetCloudNames() ([]string, error) { return ret0, ret1 } -// GetCloudNames indicates an expected call of GetCloudNames +// GetCloudNames indicates an expected call of GetCloudNames. func (mr *MockValidValuesFetcherMockRecorder) GetCloudNames() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCloudNames", reflect.TypeOf((*MockValidValuesFetcher)(nil).GetCloudNames)) } -// GetNetworkNames mocks base method +// GetNetworkNames mocks base method. func (m *MockValidValuesFetcher) GetNetworkNames(cloud string) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetNetworkNames", cloud) @@ -56,13 +56,13 @@ func (m *MockValidValuesFetcher) GetNetworkNames(cloud string) ([]string, error) return ret0, ret1 } -// GetNetworkNames indicates an expected call of GetNetworkNames +// GetNetworkNames indicates an expected call of GetNetworkNames. func (mr *MockValidValuesFetcherMockRecorder) GetNetworkNames(cloud interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkNames", reflect.TypeOf((*MockValidValuesFetcher)(nil).GetNetworkNames), cloud) } -// GetFlavorNames mocks base method +// GetFlavorNames mocks base method. func (m *MockValidValuesFetcher) GetFlavorNames(cloud string) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetFlavorNames", cloud) @@ -71,13 +71,13 @@ func (m *MockValidValuesFetcher) GetFlavorNames(cloud string) ([]string, error) return ret0, ret1 } -// GetFlavorNames indicates an expected call of GetFlavorNames +// GetFlavorNames indicates an expected call of GetFlavorNames. func (mr *MockValidValuesFetcherMockRecorder) GetFlavorNames(cloud interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFlavorNames", reflect.TypeOf((*MockValidValuesFetcher)(nil).GetFlavorNames), cloud) } -// GetNetworkExtensionsAliases mocks base method +// GetNetworkExtensionsAliases mocks base method. func (m *MockValidValuesFetcher) GetNetworkExtensionsAliases(cloud string) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetNetworkExtensionsAliases", cloud) @@ -86,13 +86,13 @@ func (m *MockValidValuesFetcher) GetNetworkExtensionsAliases(cloud string) ([]st return ret0, ret1 } -// GetNetworkExtensionsAliases indicates an expected call of GetNetworkExtensionsAliases +// GetNetworkExtensionsAliases indicates an expected call of GetNetworkExtensionsAliases. func (mr *MockValidValuesFetcherMockRecorder) GetNetworkExtensionsAliases(cloud interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkExtensionsAliases", reflect.TypeOf((*MockValidValuesFetcher)(nil).GetNetworkExtensionsAliases), cloud) } -// GetServiceCatalog mocks base method +// GetServiceCatalog mocks base method. func (m *MockValidValuesFetcher) GetServiceCatalog(cloud string) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetServiceCatalog", cloud) @@ -101,13 +101,13 @@ func (m *MockValidValuesFetcher) GetServiceCatalog(cloud string) ([]string, erro return ret0, ret1 } -// GetServiceCatalog indicates an expected call of GetServiceCatalog +// GetServiceCatalog indicates an expected call of GetServiceCatalog. func (mr *MockValidValuesFetcherMockRecorder) GetServiceCatalog(cloud interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceCatalog", reflect.TypeOf((*MockValidValuesFetcher)(nil).GetServiceCatalog), cloud) } -// GetFloatingIPNames mocks base method +// GetFloatingIPNames mocks base method. func (m *MockValidValuesFetcher) GetFloatingIPNames(cloud, floatingNetwork string) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetFloatingIPNames", cloud, floatingNetwork) @@ -116,8 +116,23 @@ func (m *MockValidValuesFetcher) GetFloatingIPNames(cloud, floatingNetwork strin return ret0, ret1 } -// GetFloatingIPNames indicates an expected call of GetFloatingIPNames +// GetFloatingIPNames indicates an expected call of GetFloatingIPNames. func (mr *MockValidValuesFetcherMockRecorder) GetFloatingIPNames(cloud, floatingNetwork interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFloatingIPNames", reflect.TypeOf((*MockValidValuesFetcher)(nil).GetFloatingIPNames), cloud, floatingNetwork) } + +// GetSubnetCIDR mocks base method. +func (m *MockValidValuesFetcher) GetSubnetCIDR(cloud, subnetID string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetCIDR", cloud, subnetID) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetCIDR indicates an expected call of GetSubnetCIDR. +func (mr *MockValidValuesFetcherMockRecorder) GetSubnetCIDR(cloud, subnetID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetCIDR", reflect.TypeOf((*MockValidValuesFetcher)(nil).GetSubnetCIDR), cloud, subnetID) +} diff --git a/pkg/types/openstack/validation/platform.go b/pkg/types/openstack/validation/platform.go index 7b6ad4edd6d..e2f3e2dc6e4 100644 --- a/pkg/types/openstack/validation/platform.go +++ b/pkg/types/openstack/validation/platform.go @@ -2,6 +2,8 @@ package validation import ( "errors" + "fmt" + "net" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/validation/field" @@ -20,6 +22,22 @@ func ValidatePlatform(p *openstack.Platform, n *types.Networking, fldPath *field } else if !isValidValue(p.Cloud, validClouds) { allErrs = append(allErrs, field.NotSupported(fldPath.Child("cloud"), p.Cloud, validClouds)) } else { + if p.MachinesSubnet != "" { + if len(p.ExternalDNS) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("externalDNS"), p.ExternalDNS, "externalDNS is set, externalDNS is not supported when machinesSubnet is set")) + } + if !validUUIDv4(p.MachinesSubnet) { + allErrs = append(allErrs, field.InternalError(fldPath.Child("machinesSubnet"), errors.New("invalid subnet ID"))) + } else { + cidr, err := fetcher.GetSubnetCIDR(p.Cloud, p.MachinesSubnet) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath.Child("machinesSubnet"), fmt.Errorf("invalid subnet %v", err))) + } + if n.MachineNetwork[0].CIDR.String() != cidr { + allErrs = append(allErrs, field.InternalError(fldPath.Child("machinesSubnet"), fmt.Errorf("the first CIDR in machineNetwork, %s, doesn't match the CIDR of the machineSubnet, %s", n.MachineNetwork[0].CIDR.String(), cidr))) + } + } + } validNetworks, err := fetcher.GetNetworkNames(p.Cloud) if err != nil { allErrs = append(allErrs, field.InternalError(fldPath.Child("externalNetwork"), errors.New("could not retrieve valid networks"))) @@ -59,12 +77,26 @@ func ValidatePlatform(p *openstack.Platform, n *types.Networking, fldPath *field allErrs = append(allErrs, field.Invalid(field.NewPath("metadata", "name"), c.ObjectMeta.Name, "metadata name is too long, please restrict it to 14 characters")) } + if len(p.ExternalDNS) > 0 && p.MachinesSubnet != "" { + allErrs = append(allErrs, field.InternalError(fldPath.Child("machinesSubnet"), fmt.Errorf("externalDNS can't be set when using a custom machinesSubnet"))) + } + for _, ip := range p.ExternalDNS { if err := validate.IP(ip); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("ExternalDNS"), p.ExternalDNS, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath.Child("externalDNS"), p.ExternalDNS, err.Error())) } } + err = validateVIP(p.APIVIP, n) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVIP"), p.APIVIP, err.Error())) + } + + err = validateVIP(p.IngressVIP, n) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ingressVIP"), p.IngressVIP, err.Error())) + } + return allErrs } @@ -76,3 +108,16 @@ func isValidValue(s string, validValues []string) bool { } return false } + +func validateVIP(vip string, n *types.Networking) error { + if vip != "" { + if err := validate.IP(vip); err != nil { + return err + } + + if !n.MachineNetwork[0].CIDR.Contains(net.ParseIP(vip)) { + return errors.New("IP is not in the machineNetwork") + } + } + return nil +} diff --git a/pkg/types/openstack/validation/platform_test.go b/pkg/types/openstack/validation/platform_test.go index 55971cbef4f..519178d7e6f 100644 --- a/pkg/types/openstack/validation/platform_test.go +++ b/pkg/types/openstack/validation/platform_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/util/validation/field" + "github.com/openshift/installer/pkg/ipnet" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/openstack" "github.com/openshift/installer/pkg/types/openstack/validation/mock" @@ -21,21 +22,34 @@ func validPlatform() *openstack.Platform { } } +func validNetworking() *types.Networking { + return &types.Networking{ + NetworkType: "OpenShiftSDN", + MachineNetwork: []types.MachineNetworkEntry{{ + CIDR: *ipnet.MustParseCIDR("10.0.0.0/16"), + }}, + } +} + func TestValidatePlatform(t *testing.T) { cases := []struct { - name string - platform *openstack.Platform - noClouds bool - noNetworks bool - noFlavors bool - noNetExts bool - noServiceCatalog bool - valid bool + name string + platform *openstack.Platform + networking *types.Networking + noClouds bool + noNetworks bool + noFlavors bool + noNetExts bool + noServiceCatalog bool + validMachinesSubnet bool + invalidMachinesSubnet bool + valid bool }{ { - name: "minimal", - platform: validPlatform(), - valid: true, + name: "minimal", + platform: validPlatform(), + networking: validNetworking(), + valid: true, }, { name: "missing cloud", @@ -44,7 +58,8 @@ func TestValidatePlatform(t *testing.T) { p.Cloud = "" return p }(), - valid: false, + networking: validNetworking(), + valid: false, }, { name: "missing external network", @@ -53,7 +68,8 @@ func TestValidatePlatform(t *testing.T) { p.ExternalNetwork = "" return p }(), - valid: false, + networking: validNetworking(), + valid: false, }, { name: "valid default machine pool", @@ -62,7 +78,8 @@ func TestValidatePlatform(t *testing.T) { p.DefaultMachinePlatform = &openstack.MachinePool{} return p }(), - valid: true, + networking: validNetworking(), + valid: true, }, { name: "non IP external dns", @@ -73,7 +90,8 @@ func TestValidatePlatform(t *testing.T) { } return p }(), - valid: false, + networking: validNetworking(), + valid: false, }, { name: "valid external dns", @@ -84,38 +102,156 @@ func TestValidatePlatform(t *testing.T) { } return p }(), - valid: true, + networking: validNetworking(), + valid: true, }, { - name: "clouds fetch failure", - platform: validPlatform(), - noClouds: true, - valid: false, + name: "clouds fetch failure", + platform: validPlatform(), + networking: validNetworking(), + noClouds: true, + valid: false, }, { name: "networks fetch failure", platform: validPlatform(), + networking: validNetworking(), noNetworks: true, valid: false, }, { - name: "flavors fetch failure", - platform: validPlatform(), - noFlavors: true, - valid: false, + name: "flavors fetch failure", + platform: validPlatform(), + networking: validNetworking(), + noFlavors: true, + valid: false, }, { - name: "network extensions fetch failure", - platform: validPlatform(), - noNetExts: true, - valid: true, + name: "network extensions fetch failure", + platform: validPlatform(), + networking: validNetworking(), + noNetExts: true, + valid: true, }, { name: "service catalog fetch failure", platform: validPlatform(), + networking: validNetworking(), noServiceCatalog: true, valid: true, }, + { + name: "valid custom API vip", + platform: func() *openstack.Platform { + p := validPlatform() + p.APIVIP = "10.0.0.9" + return p + }(), + networking: validNetworking(), + valid: true, + }, + { + name: "incorrect network custom API vip", + platform: func() *openstack.Platform { + p := validPlatform() + p.APIVIP = "11.1.0.5" + return p + }(), + networking: validNetworking(), + valid: false, + }, + { + name: "valid custom ingress vip", + platform: func() *openstack.Platform { + p := validPlatform() + p.IngressVIP = "10.0.0.9" + return p + }(), + networking: validNetworking(), + valid: true, + }, + { + name: "incorrect network custom ingress vip", + platform: func() *openstack.Platform { + p := validPlatform() + p.IngressVIP = "11.1.0.5" + return p + }(), + networking: validNetworking(), + valid: false, + }, + { + name: "invalid network custom ingress vip", + platform: func() *openstack.Platform { + p := validPlatform() + p.IngressVIP = "banana" + return p + }(), + networking: validNetworking(), + valid: false, + }, + { + name: "invalid network custom API vip", + platform: func() *openstack.Platform { + p := validPlatform() + p.APIVIP = "banana" + return p + }(), + networking: validNetworking(), + valid: false, + }, + { + name: "valid MachinesSubnet", + platform: func() *openstack.Platform { + p := validPlatform() + p.MachinesSubnet = "c664df47-4f7e-4852-819e-e66f9882b7b3" + return p + }(), + networking: validNetworking(), + validMachinesSubnet: true, + valid: true, + }, + { + name: "valid MachinesSubnet invalid machineNetwork", + platform: func() *openstack.Platform { + p := validPlatform() + p.MachinesSubnet = "c664df47-4f7e-4852-819e-e66f9882b7b3" + return p + }(), + networking: func() *types.Networking { + n := validNetworking() + n.MachineNetwork[0].CIDR = *ipnet.MustParseCIDR("105.90.0.0/16") + return n + }(), + validMachinesSubnet: true, + valid: false, + }, + { + name: "invalid MachinesSubnet", + platform: func() *openstack.Platform { + p := validPlatform() + p.MachinesSubnet = "subnet-c17b" + return p + }(), + networking: validNetworking(), + invalidMachinesSubnet: true, + valid: false, + }, + { + name: "valid MachinesSubnet externalDNS set", + platform: func() *openstack.Platform { + p := validPlatform() + p.MachinesSubnet = "c664df47-4f7e-4852-819e-e66f9882b7b3" + p.ExternalDNS = []string{ + "192.168.1.12", + "10.0.5.16", + } + return p + }(), + networking: validNetworking(), + validMachinesSubnet: true, + valid: false, + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { @@ -166,11 +302,21 @@ func TestValidatePlatform(t *testing.T) { Return([]string{"octavia"}, nil). MaxTimes(1) } + if tc.validMachinesSubnet { + fetcher.EXPECT().GetSubnetCIDR(tc.platform.Cloud, tc.platform.MachinesSubnet). + Return("10.0.0.0/16", nil). + MaxTimes(1) + } + if tc.invalidMachinesSubnet { + fetcher.EXPECT().GetSubnetCIDR(tc.platform.Cloud, tc.platform.MachinesSubnet). + Return("", errors.New("invalid machinesSubnet")). + MaxTimes(1) + } testConfig := types.InstallConfig{} testConfig.ObjectMeta.Name = "test" - err := ValidatePlatform(tc.platform, nil, field.NewPath("test-path"), fetcher, &testConfig).ToAggregate() + err := ValidatePlatform(tc.platform, tc.networking, field.NewPath("test-path"), fetcher, &testConfig).ToAggregate() if tc.valid { assert.NoError(t, err) } else { diff --git a/pkg/types/openstack/validation/validvaluesfetcher.go b/pkg/types/openstack/validation/validvaluesfetcher.go index 1b7d0a31b57..8fe7ebb406e 100644 --- a/pkg/types/openstack/validation/validvaluesfetcher.go +++ b/pkg/types/openstack/validation/validvaluesfetcher.go @@ -16,4 +16,6 @@ type ValidValuesFetcher interface { GetServiceCatalog(cloud string) ([]string, error) // GetFloatingIPNames gets the floating IPs GetFloatingIPNames(cloud string, floatingNetwork string) ([]string, error) + // GetSubnetCIDR gets the CIDR of a subnet + GetSubnetCIDR(cloud string, subnetID string) (string, error) } diff --git a/pkg/types/validation/installconfig.go b/pkg/types/validation/installconfig.go index 0453693c8e0..e592b4f3134 100644 --- a/pkg/types/validation/installconfig.go +++ b/pkg/types/validation/installconfig.go @@ -34,12 +34,6 @@ const ( masterPoolName = "master" ) -// ClusterDomain returns the cluster domain for a cluster with the specified -// base domain and cluster name. -func ClusterDomain(baseDomain, clusterName string) string { - return fmt.Sprintf("%s.%s", clusterName, baseDomain) -} - // ValidateInstallConfig checks that the specified install config is valid. func ValidateInstallConfig(c *types.InstallConfig, openStackValidValuesFetcher openstackvalidation.ValidValuesFetcher) field.ErrorList { allErrs := field.ErrorList{} @@ -74,7 +68,7 @@ func ValidateInstallConfig(c *types.InstallConfig, openStackValidValuesFetcher o allErrs = append(allErrs, field.Invalid(field.NewPath("baseDomain"), c.BaseDomain, baseDomainErr.Error())) } if nameErr == nil && baseDomainErr == nil { - clusterDomain := ClusterDomain(c.BaseDomain, c.ObjectMeta.Name) + clusterDomain := c.ClusterDomain() if err := validate.DomainName(clusterDomain, true); err != nil { allErrs = append(allErrs, field.Invalid(field.NewPath("baseDomain"), clusterDomain, err.Error())) } @@ -378,7 +372,7 @@ func validatePlatform(platform *types.Platform, fldPath *field.Path, openStackVa } if platform.BareMetal != nil { validate(baremetal.Name, platform.BareMetal, func(f *field.Path) field.ErrorList { - return baremetalvalidation.ValidatePlatform(platform.BareMetal, network, f) + return baremetalvalidation.ValidatePlatform(platform.BareMetal, network, f, c) }) } return allErrs diff --git a/pkg/types/validation/installconfig_test.go b/pkg/types/validation/installconfig_test.go index 15345fb2582..18d02fc1703 100644 --- a/pkg/types/validation/installconfig_test.go +++ b/pkg/types/validation/installconfig_test.go @@ -97,13 +97,32 @@ func validBareMetalPlatform() *baremetal.Platform { ProvisioningNetworkCIDR: ipnet.MustParseCIDR("192.168.111.0/24"), BootstrapProvisioningIP: "192.168.111.1", ClusterProvisioningIP: "192.168.111.2", - Hosts: []*baremetal.Host{}, - ExternalBridge: iface[0].Name, - ProvisioningBridge: iface[0].Name, - DefaultMachinePlatform: &baremetal.MachinePool{}, - APIVIP: "10.0.0.5", - IngressVIP: "10.0.0.4", - DNSVIP: "10.0.0.2", + Hosts: []*baremetal.Host{ + { + Name: "host1", + BootMACAddress: "CA:FE:CA:FE:00:00", + BMC: baremetal.BMC{ + Username: "root", + Password: "password", + Address: "ipmi://192.168.111.1", + }, + }, + { + Name: "host2", + BootMACAddress: "CA:FE:CA:FE:00:01", + BMC: baremetal.BMC{ + Username: "root", + Password: "password", + Address: "ipmi://192.168.111.2", + }, + }, + }, + ExternalBridge: iface[0].Name, + ProvisioningBridge: iface[0].Name, + DefaultMachinePlatform: &baremetal.MachinePool{}, + APIVIP: "10.0.0.5", + IngressVIP: "10.0.0.4", + DNSVIP: "10.0.0.2", } } @@ -470,7 +489,7 @@ func TestValidateInstallConfig(t *testing.T) { } return c }(), - expectedError: `^compute\[0\]\.platform\.openstack: Invalid value: openstack\.MachinePool{FlavorName:"", RootVolume:\(\*openstack\.RootVolume\)\(nil\)}: cannot specify "openstack" for machine pool when cluster is using "aws"$`, + expectedError: `^compute\[0\]\.platform\.openstack: Invalid value: openstack\.MachinePool{.*}: cannot specify "openstack" for machine pool when cluster is using "aws"$`, }, { name: "missing platform", @@ -499,7 +518,7 @@ func TestValidateInstallConfig(t *testing.T) { } return c }(), - expectedError: `^platform\.aws\.region: Unsupported value: "": supported values: "ap-northeast-1", "ap-northeast-2", "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-north-1", "eu-west-1", "eu-west-2", "eu-west-3", "me-south-1", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2"$`, + expectedError: `^platform\.aws\.region: Required value: region must be specified$`, }, { name: "valid libvirt platform", diff --git a/pkg/types/vsphere/platform.go b/pkg/types/vsphere/platform.go index a9b5c1c684b..f2510637772 100644 --- a/pkg/types/vsphere/platform.go +++ b/pkg/types/vsphere/platform.go @@ -33,9 +33,6 @@ type Platform struct { // IngressVIP is the virtual IP address for ingress IngressVIP string `json:"ingressVIP,omitempty"` - // DNSVIP is the virtual IP address for DNS - DNSVIP string `json:"dnsVIP,omitempty"` - // DefaultMachinePlatform is the default configuration used when // installing on VSphere for machine pools which do not define their own // platform configuration. diff --git a/pkg/types/vsphere/validation/platform.go b/pkg/types/vsphere/validation/platform.go index c4ee5791beb..a9f03cc0ba5 100644 --- a/pkg/types/vsphere/validation/platform.go +++ b/pkg/types/vsphere/validation/platform.go @@ -29,7 +29,7 @@ func ValidatePlatform(p *vsphere.Platform, fldPath *field.Path) field.ErrorList } // If all VIPs are empty, skip IP validation. All VIPs are required to be defined together. - if strings.Join([]string{p.APIVIP, p.IngressVIP, p.DNSVIP}, "") != "" { + if strings.Join([]string{p.APIVIP, p.IngressVIP}, "") != "" { allErrs = append(allErrs, validateVIPs(p, fldPath)...) } @@ -69,11 +69,5 @@ func validateVIPs(p *vsphere.Platform, fldPath *field.Path) field.ErrorList { allErrs = append(allErrs, field.Invalid(fldPath.Child("ingressVIP"), p.IngressVIP, err.Error())) } - if len(p.DNSVIP) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("dnsVIP"), "must specify a VIP for DNS")) - } else if err := validate.IP(p.DNSVIP); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("dnsVIP"), p.DNSVIP, err.Error())) - } - return allErrs } diff --git a/pkg/types/vsphere/validation/platform_test.go b/pkg/types/vsphere/validation/platform_test.go index a65aed1d99f..7fc80d89198 100644 --- a/pkg/types/vsphere/validation/platform_test.go +++ b/pkg/types/vsphere/validation/platform_test.go @@ -80,7 +80,6 @@ func TestValidatePlatform(t *testing.T) { p := validPlatform() p.APIVIP = "192.168.111.2" p.IngressVIP = "192.168.111.3" - p.DNSVIP = "192.168.111.4" return p }(), // expectedError: `^test-path\.apiVIP: Invalid value: "": "" is not a valid IP`, @@ -91,7 +90,6 @@ func TestValidatePlatform(t *testing.T) { p := validPlatform() p.APIVIP = "" p.IngressVIP = "192.168.111.3" - p.DNSVIP = "192.168.111.4" return p }(), expectedError: `^test-path\.apiVIP: Required value: must specify a VIP for the API`, @@ -102,29 +100,16 @@ func TestValidatePlatform(t *testing.T) { p := validPlatform() p.APIVIP = "192.168.111.2" p.IngressVIP = "" - p.DNSVIP = "192.168.111.4" return p }(), expectedError: `^test-path\.ingressVIP: Required value: must specify a VIP for Ingress`, }, - { - name: "missing DNS VIP", - platform: func() *vsphere.Platform { - p := validPlatform() - p.APIVIP = "192.168.111.2" - p.IngressVIP = "192.168.111.3" - p.DNSVIP = "" - return p - }(), - expectedError: `^test-path\.dnsVIP: Required value: must specify a VIP for DNS`, - }, { name: "Invalid API VIP", platform: func() *vsphere.Platform { p := validPlatform() p.APIVIP = "192.168.111" p.IngressVIP = "192.168.111.2" - p.DNSVIP = "192.168.111.3" return p }(), expectedError: `^test-path.apiVIP: Invalid value: "192.168.111": "192.168.111" is not a valid IP`, @@ -135,22 +120,10 @@ func TestValidatePlatform(t *testing.T) { p := validPlatform() p.APIVIP = "192.168.111.1" p.IngressVIP = "192.168.111" - p.DNSVIP = "192.168.111.3" return p }(), expectedError: `^test-path.ingressVIP: Invalid value: "192.168.111": "192.168.111" is not a valid IP`, }, - { - name: "Invalid DNS VIP", - platform: func() *vsphere.Platform { - p := validPlatform() - p.APIVIP = "192.168.111.2" - p.IngressVIP = "192.168.111.3" - p.DNSVIP = "192.168.111" - return p - }(), - expectedError: `^test-path.dnsVIP: Invalid value: "192.168.111": "192.168.111" is not a valid IP`, - }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { diff --git a/platformtests/aws/README.md b/platformtests/aws/README.md deleted file mode 100644 index e090126a376..00000000000 --- a/platformtests/aws/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# AWS Tests - -This directory contains test suites checking AWS-specific assumptions. -Run with: - -```console -$ AWS_PROFILE=your-profile go test . -``` - -or similar (it needs access to [your AWS credentials][credentials]). - -[credentials]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html diff --git a/platformtests/aws/default_instance_class_test.go b/platformtests/aws/default_instance_class_test.go deleted file mode 100644 index 365646ead84..00000000000 --- a/platformtests/aws/default_instance_class_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/pricing" - awsutil "github.com/openshift/installer/pkg/asset/installconfig/aws" - "github.com/openshift/installer/pkg/types/aws/defaults" - "github.com/openshift/installer/pkg/types/aws/validation" - "github.com/stretchr/testify/assert" -) - -func TestGetDefaultInstanceClass(t *testing.T) { - preferredInstanceClasses := []string{"m4", "m5"} // decreasing precedence - - ssn, err := awsutil.GetSession() - if err != nil { - t.Fatal(err) - } - - exists := struct{}{} - pricingInstanceClasses := map[string]map[string]struct{}{} - - pricingClient := pricing.New(ssn, aws.NewConfig().WithRegion("us-east-1")) - err = pricingClient.GetProductsPages( - &pricing.GetProductsInput{ - ServiceCode: aws.String("AmazonEC2"), - Filters: []*pricing.Filter{ - { - Field: aws.String("tenancy"), - Type: aws.String("TERM_MATCH"), - Value: aws.String("Shared"), - }, - { - Field: aws.String("productFamily"), - Type: aws.String("TERM_MATCH"), - Value: aws.String("Compute Instance"), - }, - { - Field: aws.String("operatingSystem"), - Type: aws.String("TERM_MATCH"), - Value: aws.String("Linux"), - }, - { - Field: aws.String("instanceFamily"), - Type: aws.String("TERM_MATCH"), - Value: aws.String("General purpose"), - }, - }, - }, - func(result *pricing.GetProductsOutput, lastPage bool) bool { - for _, priceList := range result.PriceList { - product := priceList["product"].(map[string]interface{}) - attr := product["attributes"].(map[string]interface{}) - location := attr["location"].(string) - instanceType := attr["instanceType"].(string) - instanceClassSlice := strings.Split(instanceType, ".") - instanceClass := instanceClassSlice[0] - _, ok := pricingInstanceClasses[location] - if ok { - pricingInstanceClasses[location][instanceClass] = exists - } else { - pricingInstanceClasses[location] = map[string]struct{}{instanceClass: exists} - } - } - return !lastPage - }, - ) - if err != nil { - t.Fatal(err) - } - - regions := map[string]string{ // seed with locations that don't match AWS's usual names - "AWS GovCloud (US)": "us-gov-west-1", - "AWS GovCloud (US-East)": "us-gov-east-1", - "Asia Pacific (Hong Kong)": "ap-east-1", - "Asia Pacific (Osaka-Local)": "ap-northeast-3", - "EU (Stockholm)": "eu-north-1", - "Middle East (Bahrain)": "me-south-1", - "South America (Sao Paulo)": "sa-east-1", - } - - for location, classes := range pricingInstanceClasses { - t.Run(location, func(t *testing.T) { - region, ok := regions[location] - if !ok { - for slug, name := range validation.Regions { - if strings.Contains(location, name) { - regions[location] = slug - region = slug - break - } - } - if region == "" { - t.Fatal("not a recognized region") - } - } - - ec2Client := ec2.New(ssn, aws.NewConfig().WithRegion(region)) - zonesResponse, err := ec2Client.DescribeAvailabilityZones(nil) - if err != nil { - t.Logf("no direct access to region, assuming full support: %v", err) - - var match string - for _, instanceClass := range preferredInstanceClasses { - if _, ok := classes[instanceClass]; ok { - match = instanceClass - break - } - } - - if match == "" { - t.Fatalf("none of the preferred instance classes are priced: %v", classes) - } - - t.Log(classes) - assert.Equal(t, defaults.InstanceClass(region), match) - return - } - - zones := make(map[string]struct{}, len(zonesResponse.AvailabilityZones)) - for _, zone := range zonesResponse.AvailabilityZones { - zones[*zone.ZoneName] = exists - } - - available := make(map[string]map[string]struct{}, len(preferredInstanceClasses)) - var allowed []string - - for _, instanceClass := range preferredInstanceClasses { - if _, ok := classes[instanceClass]; !ok { - t.Logf("skip the unpriced %s", instanceClass) - continue - } - - available[instanceClass] = make(map[string]struct{}, len(zones)) - exampleInstanceType := fmt.Sprintf("%s.large", instanceClass) - err := ec2Client.DescribeReservedInstancesOfferingsPages( - &ec2.DescribeReservedInstancesOfferingsInput{ - Filters: []*ec2.Filter{ - {Name: aws.String("scope"), Values: []*string{aws.String("Availability Zone")}}, - }, - InstanceTenancy: aws.String("default"), - InstanceType: &exampleInstanceType, - ProductDescription: aws.String("Linux/UNIX"), - }, - func(results *ec2.DescribeReservedInstancesOfferingsOutput, lastPage bool) bool { - for _, offering := range results.ReservedInstancesOfferings { - if offering.AvailabilityZone == nil { - continue - } - - available[instanceClass][*offering.AvailabilityZone] = exists - } - - return !lastPage - }, - ) - if err != nil { - t.Fatal(err) - } - - if reflect.DeepEqual(available[instanceClass], zones) { - allowed = append(allowed, instanceClass) - } - } - - if len(allowed) == 0 { - t.Fatalf("none of the preferred instance classes are fully supported: %v", available) - } - - t.Log(available) - assert.Contains(t, allowed, defaults.InstanceClass(region)) - }) - } -} diff --git a/upi/aws/cloudformation/03_cluster_security.yaml b/upi/aws/cloudformation/03_cluster_security.yaml index 792238838e2..2d69e18127d 100644 --- a/upi/aws/cloudformation/03_cluster_security.yaml +++ b/upi/aws/cloudformation/03_cluster_security.yaml @@ -392,16 +392,56 @@ Resources: Version: "2012-10-17" Statement: - Effect: "Allow" - Action: "ec2:*" - Resource: "*" - - Effect: "Allow" - Action: "elasticloadbalancing:*" - Resource: "*" - - Effect: "Allow" - Action: "iam:PassRole" - Resource: "*" - - Effect: "Allow" - Action: "s3:GetObject" + Action: + - "ec2:DescribeInstances" + - "ec2:DescribeRegions" + - "ec2:DescribeRouteTables" + - "ec2:DescribeSecurityGroups" + - "ec2:DescribeSubnets" + - "ec2:DescribeVolumes" + - "ec2:CreateSecurityGroup" + - "ec2:CreateTags" + - "ec2:CreateVolume" + - "ec2:ModifyInstanceAttribute" + - "ec2:ModifyVolume" + - "ec2:AttachVolume" + - "ec2:AuthorizeSecurityGroupIngress" + - "ec2:DeleteSecurityGroup" + - "ec2:DeleteVolume" + - "ec2:DetachVolume" + - "ec2:RevokeSecurityGroupIngress" + - "ec2:DescribeVpcs" + - "elasticloadbalancing:AddTags" + - "elasticloadbalancing:AttachLoadBalancerToSubnets" + - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer" + - "elasticloadbalancing:CreateLoadBalancer" + - "elasticloadbalancing:CreateLoadBalancerPolicy" + - "elasticloadbalancing:CreateLoadBalancerListeners" + - "elasticloadbalancing:ConfigureHealthCheck" + - "elasticloadbalancing:DeleteLoadBalancer" + - "elasticloadbalancing:DeleteLoadBalancerListeners" + - "elasticloadbalancing:DescribeLoadBalancers" + - "elasticloadbalancing:DescribeLoadBalancerAttributes" + - "elasticloadbalancing:DetachLoadBalancerFromSubnets" + - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer" + - "elasticloadbalancing:ModifyLoadBalancerAttributes" + - "elasticloadbalancing:RegisterInstancesWithLoadBalancer" + - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer" + - "elasticloadbalancing:AddTags" + - "elasticloadbalancing:CreateListener" + - "elasticloadbalancing:CreateTargetGroup" + - "elasticloadbalancing:DeleteListener" + - "elasticloadbalancing:DeleteTargetGroup" + - "elasticloadbalancing:DescribeListeners" + - "elasticloadbalancing:DescribeLoadBalancerPolicies" + - "elasticloadbalancing:DescribeTargetGroups" + - "elasticloadbalancing:DescribeTargetHealth" + - "elasticloadbalancing:ModifyListener" + - "elasticloadbalancing:ModifyTargetGroup" + - "elasticloadbalancing:RegisterTargets" + - "elasticloadbalancing:DeregisterTargets" + - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + - "kms:DescribeKey" Resource: "*" MasterInstanceProfile: @@ -428,7 +468,9 @@ Resources: Version: "2012-10-17" Statement: - Effect: "Allow" - Action: "ec2:Describe*" + Action: + - "ec2:DescribeInstances" + - "ec2:DescribeRegions" Resource: "*" WorkerInstanceProfile: diff --git a/upi/metal/bootstrap/matchbox.tf b/upi/metal/bootstrap/matchbox.tf index 9d8ae8e16ad..f2f26e20bcb 100644 --- a/upi/metal/bootstrap/matchbox.tf +++ b/upi/metal/bootstrap/matchbox.tf @@ -1,25 +1,25 @@ resource "matchbox_profile" "bootstrap" { name = "${var.cluster_id}-bootstrap" - kernel = "${var.pxe_kernel}" + kernel = var.pxe_kernel initrd = [ - "${var.pxe_initrd}", + var.pxe_initrd, ] - args = [ - "${var.pxe_kernel_args}", - "coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?cluster_id=${var.cluster_id}&role=bootstrap", - ] + args = concat( + var.pxe_kernel_args, + ["coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?cluster_id=${var.cluster_id}&role=bootstrap"], + ) - raw_ignition = "${var.igntion_config_content}" + raw_ignition = var.igntion_config_content } resource "matchbox_group" "bootstrap" { name = "${var.cluster_id}-bootstrap" - profile = "${matchbox_profile.bootstrap.name}" + profile = matchbox_profile.bootstrap.name - selector { - cluster_id = "${var.cluster_id}" + selector = { + cluster_id = var.cluster_id role = "bootstrap" } } diff --git a/upi/metal/bootstrap/outputs.tf b/upi/metal/bootstrap/outputs.tf index 1d08a6a0fb9..a875c322eb8 100644 --- a/upi/metal/bootstrap/outputs.tf +++ b/upi/metal/bootstrap/outputs.tf @@ -1,11 +1,11 @@ output "device_ip" { - value = "${packet_device.bootstrap.network.0.address}" + value = packet_device.bootstrap.network[0].address } output "device_hostname" { - value = "${packet_device.bootstrap.hostname}" + value = packet_device.bootstrap.hostname } output "device_id" { - value = "${packet_device.bootstrap.id}" + value = packet_device.bootstrap.id } diff --git a/upi/metal/bootstrap/packet.tf b/upi/metal/bootstrap/packet.tf index e779b832cad..5f985988264 100644 --- a/upi/metal/bootstrap/packet.tf +++ b/upi/metal/bootstrap/packet.tf @@ -1,11 +1,11 @@ resource "packet_device" "bootstrap" { hostname = "${var.cluster_id}-bootstrap" plan = "c1.small.x86" - facilities = ["${var.packet_facility}"] + facilities = [var.packet_facility] operating_system = "custom_ipxe" ipxe_script_url = "${var.matchbox_http_endpoint}/ipxe?cluster_id=${var.cluster_id}&role=bootstrap" billing_cycle = "hourly" - project_id = "${var.packet_project_id}" + project_id = var.packet_project_id - depends_on = ["matchbox_group.bootstrap"] + depends_on = [matchbox_group.bootstrap] } diff --git a/upi/metal/bootstrap/variables.tf b/upi/metal/bootstrap/variables.tf index eb131e3ca65..bb5848d8883 100644 --- a/upi/metal/bootstrap/variables.tf +++ b/upi/metal/bootstrap/variables.tf @@ -1,31 +1,31 @@ variable "pxe_kernel" { - type = "string" + type = string } variable "pxe_initrd" { - type = "string" + type = string } variable "pxe_kernel_args" { - type = "list" + type = list(string) } variable "matchbox_http_endpoint" { - type = "string" + type = string } variable "cluster_id" { - type = "string" + type = string } variable "igntion_config_content" { - type = "string" + type = string } variable "packet_facility" { - type = "string" + type = string } variable "packet_project_id" { - type = "string" + type = string } diff --git a/upi/metal/config.tf b/upi/metal/config.tf index 168f9bfa70d..b6435fdb8c0 100644 --- a/upi/metal/config.tf +++ b/upi/metal/config.tf @@ -1,51 +1,56 @@ # ================COMMON===================== variable "cluster_id" { - type = "string" + type = string description = < 0 ? 1 : 0 + + zone_id = data.aws_route53_zone.public.zone_id type = "A" ttl = "60" name = "*.apps.${var.cluster_domain}" - records = ["${local.worker_public_ipv4}"] + records = local.worker_public_ipv4 } resource "aws_route53_record" "etcd_a_nodes" { - count = "${var.master_count}" - zone_id = "${data.aws_route53_zone.public.zone_id}" + count = var.master_count + zone_id = data.aws_route53_zone.public.zone_id type = "A" ttl = "60" name = "etcd-${count.index}.${var.cluster_domain}" - records = ["${local.master_public_ipv4[count.index]}"] + records = [local.master_public_ipv4[count.index]] } resource "aws_route53_record" "master_a_nodes" { - count = "${var.master_count}" - zone_id = "${data.aws_route53_zone.public.zone_id}" + count = var.master_count + zone_id = data.aws_route53_zone.public.zone_id type = "A" ttl = "60" name = "master-${count.index}.${var.cluster_domain}" - records = ["${local.master_public_ipv4[count.index]}"] + records = [local.master_public_ipv4[count.index]] } resource "aws_route53_record" "worker_a_nodes" { - count = "${var.worker_count}" - zone_id = "${data.aws_route53_zone.public.zone_id}" + count = var.worker_count + zone_id = data.aws_route53_zone.public.zone_id type = "A" ttl = "60" name = "worker-${count.index}.${var.cluster_domain}" - records = ["${local.worker_public_ipv4[count.index]}"] + records = [local.worker_public_ipv4[count.index]] } resource "aws_route53_record" "etcd_cluster" { - zone_id = "${data.aws_route53_zone.public.zone_id}" + zone_id = data.aws_route53_zone.public.zone_id type = "SRV" ttl = "60" name = "_etcd-server-ssl._tcp.${var.cluster_domain}" - records = ["${formatlist("0 10 2380 %s", aws_route53_record.etcd_a_nodes.*.fqdn)}"] + records = formatlist("0 10 2380 %s", aws_route53_record.etcd_a_nodes.*.fqdn) } diff --git a/upi/metal/outputs.tf b/upi/metal/outputs.tf index 9564021bfab..4f1814b59e0 100644 --- a/upi/metal/outputs.tf +++ b/upi/metal/outputs.tf @@ -1,11 +1,11 @@ output "master_ips" { - value = ["${local.master_public_ipv4}"] + value = local.master_public_ipv4 } output "worker_ips" { - value = ["${local.worker_public_ipv4}"] + value = local.worker_public_ipv4 } output "bootstrap_ip" { - value = "${module.bootstrap.device_ip}" + value = module.bootstrap.device_ip } diff --git a/upi/openstack/README.md b/upi/openstack/README.md index 9b46ebcd0e0..2127921d8ea 100644 --- a/upi/openstack/README.md +++ b/upi/openstack/README.md @@ -13,13 +13,13 @@ The playbooks in this directory are designed to reproduce an IPI installation, b Every step can be run like this: ```shell -(venv)$ ansible-playbook -i inventory.yaml 01_network.yaml +(venv)$ ansible-playbook -i inventory.yaml network.yaml ``` For every script, a symmetrical teardown playbook is provided: ```shell -(venv)$ ansible-playbook -i inventory.yaml down-01_network.yaml +(venv)$ ansible-playbook -i inventory.yaml down-network.yaml ``` A full teardown can be achieved by running all the `down` scripts in reverse order. diff --git a/upi/openstack/03_bootstrap.yaml b/upi/openstack/bootstrap.yaml similarity index 100% rename from upi/openstack/03_bootstrap.yaml rename to upi/openstack/bootstrap.yaml diff --git a/upi/openstack/common.yaml b/upi/openstack/common.yaml index 4c5d137cb9c..720347c7b47 100644 --- a/upi/openstack/common.yaml +++ b/upi/openstack/common.yaml @@ -23,7 +23,7 @@ # Server names os_bootstrap_server_name: "{{ infraID }}-bootstrap" os_cp_server_name: "{{ infraID }}-master" - os_cp_server_group_name: "{{ infraID }}-master-group" + os_cp_server_group_name: "{{ infraID }}-master" os_compute_server_name: "{{ infraID }}-worker" # Trunk names os_cp_trunk_name: "{{ infraID }}-master-trunk" diff --git a/upi/openstack/05_compute-nodes.yaml b/upi/openstack/compute-nodes.yaml similarity index 100% rename from upi/openstack/05_compute-nodes.yaml rename to upi/openstack/compute-nodes.yaml diff --git a/upi/openstack/04_control-plane.yaml b/upi/openstack/control-plane.yaml similarity index 100% rename from upi/openstack/04_control-plane.yaml rename to upi/openstack/control-plane.yaml diff --git a/upi/openstack/down-03_bootstrap.yaml b/upi/openstack/down-bootstrap.yaml similarity index 100% rename from upi/openstack/down-03_bootstrap.yaml rename to upi/openstack/down-bootstrap.yaml diff --git a/upi/openstack/down-05_compute-nodes.yaml b/upi/openstack/down-compute-nodes.yaml similarity index 100% rename from upi/openstack/down-05_compute-nodes.yaml rename to upi/openstack/down-compute-nodes.yaml diff --git a/upi/openstack/down-04_control-plane.yaml b/upi/openstack/down-control-plane.yaml similarity index 100% rename from upi/openstack/down-04_control-plane.yaml rename to upi/openstack/down-control-plane.yaml diff --git a/upi/openstack/down-06_load-balancers.yaml b/upi/openstack/down-load-balancers.yaml similarity index 87% rename from upi/openstack/down-06_load-balancers.yaml rename to upi/openstack/down-load-balancers.yaml index 0d10330c8fa..dc702ebbb81 100644 --- a/upi/openstack/down-06_load-balancers.yaml +++ b/upi/openstack/down-load-balancers.yaml @@ -1,6 +1,7 @@ # Required Python packages: # # ansible +# openstackcli # openstacksdk - import_playbook: common.yaml @@ -9,7 +10,7 @@ gather_facts: no tasks: - - name: 'Get a token for creating the server group' + - name: 'Get an auth token' os_auth: register: cloud when: os_networking_type == "Kuryr" @@ -26,7 +27,7 @@ - set_fact: versions: "{{ octavia_versions.json.versions | selectattr('id', 'match', 'v2.5') | map(attribute='id') | list }}" - when: os_networking_type == "Kuryr" + when: os_networking_type == "Kuryr" - name: 'List tagged loadbalancers' uri: @@ -43,14 +44,13 @@ # for each service present on the cluster. Let's make # sure to remove the resources generated. - name: 'Remove the cluster load balancers' - os_loadbalancer: - name: "{{ item.name }}" - state: absent - wait: no + command: + cmd: "openstack loadbalancer delete --cascade {{ item.id }}" with_items: "{{ lbs_tagged.json.loadbalancers }}" when: - os_networking_type == "Kuryr" - versions | length > 0 + - '"PENDING" not in item.provisioning_status' - name: 'List loadbalancers tagged on description' uri: @@ -67,10 +67,10 @@ # for each service present on the cluster. Let's make # sure to remove the resources generated. - name: 'Remove the cluster load balancers' - os_loadbalancer: - name: "{{ item.name }}" - state: absent + command: + cmd: "openstack loadbalancer delete --cascade {{ item.id }}" with_items: "{{ lbs_description.json.loadbalancers }}" when: - os_networking_type == "Kuryr" - versions | length == 0 + - '"PENDING" not in item.provisioning_status' diff --git a/upi/openstack/down-02_network.yaml b/upi/openstack/down-network.yaml similarity index 100% rename from upi/openstack/down-02_network.yaml rename to upi/openstack/down-network.yaml diff --git a/upi/openstack/down-01_security-groups.yaml b/upi/openstack/down-security-groups.yaml similarity index 100% rename from upi/openstack/down-01_security-groups.yaml rename to upi/openstack/down-security-groups.yaml diff --git a/upi/openstack/02_network.yaml b/upi/openstack/network.yaml similarity index 100% rename from upi/openstack/02_network.yaml rename to upi/openstack/network.yaml diff --git a/upi/openstack/01_security-groups.yaml b/upi/openstack/security-groups.yaml similarity index 58% rename from upi/openstack/01_security-groups.yaml rename to upi/openstack/security-groups.yaml index dba8dd14dfb..532bac9ed7e 100644 --- a/upi/openstack/01_security-groups.yaml +++ b/upi/openstack/security-groups.yaml @@ -81,15 +81,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: udp - remote_group: "{{ os_sg_master }}" - port_range_min: 4789 - port_range_max: 4789 - - - name: 'Create master-sg rule "VXLAN from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: udp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 4789 port_range_max: 4789 @@ -97,15 +89,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: udp - remote_group: "{{ os_sg_master }}" - port_range_min: 6081 - port_range_max: 6081 - - - name: 'Create master-sg rule "Geneve from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: udp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 6081 port_range_max: 6081 @@ -113,15 +97,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: tcp - remote_group: "{{ os_sg_master }}" - port_range_min: 6641 - port_range_max: 6642 - - - name: 'Create master-sg rule "ovndb from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: tcp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 6641 port_range_max: 6642 @@ -129,15 +105,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: tcp - remote_group: "{{ os_sg_master }}" - port_range_min: 9000 - port_range_max: 9999 - - - name: 'Create master-sg rule "master ingress internal from worker (TCP)"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: tcp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 9000 port_range_max: 9999 @@ -145,15 +113,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: udp - remote_group: "{{ os_sg_master }}" - port_range_min: 9000 - port_range_max: 9999 - - - name: 'Create master-sg rule "master ingress internal from worker (UDP)"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: udp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 9000 port_range_max: 9999 @@ -161,15 +121,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: tcp - remote_group: "{{ os_sg_master }}" - port_range_min: 10259 - port_range_max: 10259 - - - name: 'Create master-sg rule "kube scheduler from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: tcp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 10259 port_range_max: 10259 @@ -177,15 +129,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: tcp - remote_group: "{{ os_sg_master }}" - port_range_min: 10257 - port_range_max: 10257 - - - name: 'Create master-sg rule "kube controller manager from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: tcp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 10257 port_range_max: 10257 @@ -193,15 +137,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: tcp - remote_group: "{{ os_sg_master }}" - port_range_min: 10250 - port_range_max: 10250 - - - name: 'Create master-sg rule "master ingress kubelet secure from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: tcp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 10250 port_range_max: 10250 @@ -209,7 +145,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: tcp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 2379 port_range_max: 2380 @@ -217,15 +153,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: tcp - remote_group: "{{ os_sg_master }}" - port_range_min: 30000 - port_range_max: 32767 - - - name: 'Create master-sg rule "master ingress services (TCP) from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: tcp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 30000 port_range_max: 32767 @@ -233,15 +161,7 @@ os_security_group_rule: security_group: "{{ os_sg_master }}" protocol: udp - remote_group: "{{ os_sg_master }}" - port_range_min: 30000 - port_range_max: 32767 - - - name: 'Create master-sg rule "master ingress services (UDP) from worker"' - os_security_group_rule: - security_group: "{{ os_sg_master }}" - protocol: udp - remote_group: "{{ os_sg_worker }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 30000 port_range_max: 32767 @@ -298,15 +218,7 @@ os_security_group_rule: security_group: "{{ os_sg_worker }}" protocol: udp - remote_group: "{{ os_sg_worker }}" - port_range_min: 4789 - port_range_max: 4789 - - - name: 'Create worker-sg rule "VXLAN from master"' - os_security_group_rule: - security_group: "{{ os_sg_worker }}" - protocol: udp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 4789 port_range_max: 4789 @@ -314,15 +226,7 @@ os_security_group_rule: security_group: "{{ os_sg_worker }}" protocol: udp - remote_group: "{{ os_sg_worker }}" - port_range_min: 6081 - port_range_max: 6081 - - - name: 'Create worker-sg rule "Geneve from master"' - os_security_group_rule: - security_group: "{{ os_sg_worker }}" - protocol: udp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 6081 port_range_max: 6081 @@ -330,15 +234,7 @@ os_security_group_rule: security_group: "{{ os_sg_worker }}" protocol: tcp - remote_group: "{{ os_sg_worker }}" - port_range_min: 9000 - port_range_max: 9999 - - - name: 'Create worker-sg rule "worker ingress internal from master (TCP)"' - os_security_group_rule: - security_group: "{{ os_sg_worker }}" - protocol: tcp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 9000 port_range_max: 9999 @@ -346,31 +242,15 @@ os_security_group_rule: security_group: "{{ os_sg_worker }}" protocol: udp - remote_group: "{{ os_sg_worker }}" - port_range_min: 9000 - port_range_max: 9999 - - - name: 'Create worker-sg rule "worker ingress internal from master (UDP)"' - os_security_group_rule: - security_group: "{{ os_sg_worker }}" - protocol: udp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 9000 port_range_max: 9999 - - name: 'Create worker-sg rule "worker ingress kubelet secure"' - os_security_group_rule: - security_group: "{{ os_sg_worker }}" - protocol: tcp - remote_group: "{{ os_sg_worker }}" - port_range_min: 10250 - port_range_max: 10250 - - - name: 'Create worker-sg rule "worker ingress kubelet secure from master"' + - name: 'Create worker-sg rule "worker ingress kubelet insecure"' os_security_group_rule: security_group: "{{ os_sg_worker }}" protocol: tcp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 10250 port_range_max: 10250 @@ -378,15 +258,7 @@ os_security_group_rule: security_group: "{{ os_sg_worker }}" protocol: tcp - remote_group: "{{ os_sg_worker }}" - port_range_min: 30000 - port_range_max: 32767 - - - name: 'Create worker-sg rule "worker ingress services (TCP) from master"' - os_security_group_rule: - security_group: "{{ os_sg_worker }}" - protocol: tcp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 30000 port_range_max: 32767 @@ -394,15 +266,7 @@ os_security_group_rule: security_group: "{{ os_sg_worker }}" protocol: udp - remote_group: "{{ os_sg_worker }}" - port_range_min: 30000 - port_range_max: 32767 - - - name: 'Create worker-sg rule "worker ingress services (UDP) from master"' - os_security_group_rule: - security_group: "{{ os_sg_worker }}" - protocol: udp - remote_group: "{{ os_sg_master }}" + remote_ip_prefix: "{{ os_subnet_range }}" port_range_min: 30000 port_range_max: 32767 diff --git a/upi/vsphere/cluster_domain/main.tf b/upi/vsphere/cluster_domain/main.tf new file mode 100644 index 00000000000..26814e8150e --- /dev/null +++ b/upi/vsphere/cluster_domain/main.tf @@ -0,0 +1,22 @@ +data "aws_route53_zone" "base" { + name = var.base_domain +} + +resource "aws_route53_zone" "cluster" { + name = var.cluster_domain + force_destroy = true + + tags = { + "Name" = var.cluster_domain + "Platform" = "vSphere" + } +} + +resource "aws_route53_record" "name_server" { + name = var.cluster_domain + type = "NS" + ttl = "300" + zone_id = data.aws_route53_zone.base.zone_id + records = aws_route53_zone.cluster.name_servers +} + diff --git a/upi/vsphere/cluster_domain/outputs.tf b/upi/vsphere/cluster_domain/outputs.tf new file mode 100644 index 00000000000..ef8db09d4ce --- /dev/null +++ b/upi/vsphere/cluster_domain/outputs.tf @@ -0,0 +1,3 @@ +output "zone_id" { + value = aws_route53_zone.cluster.zone_id +} diff --git a/upi/vsphere/cluster_domain/variables.tf b/upi/vsphere/cluster_domain/variables.tf new file mode 100644 index 00000000000..83699e5c09c --- /dev/null +++ b/upi/vsphere/cluster_domain/variables.tf @@ -0,0 +1,9 @@ +variable "cluster_domain" { + description = "The domain for the cluster that all DNS records must belong" + type = string +} + +variable "base_domain" { + description = "The base domain used for public records." + type = string +} diff --git a/upi/vsphere/folder/main.tf b/upi/vsphere/folder/main.tf deleted file mode 100644 index 6f5605846f1..00000000000 --- a/upi/vsphere/folder/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "vsphere_folder" "folder" { - path = "${var.path}" - type = "vm" - datacenter_id = "${var.datacenter_id}" -} diff --git a/upi/vsphere/folder/output.tf b/upi/vsphere/folder/output.tf deleted file mode 100644 index d20b194905c..00000000000 --- a/upi/vsphere/folder/output.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "path" { - value = "${vsphere_folder.folder.path}" -} diff --git a/upi/vsphere/folder/variables.tf b/upi/vsphere/folder/variables.tf deleted file mode 100644 index a02bf0cfbc4..00000000000 --- a/upi/vsphere/folder/variables.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "path" { - type = "string" -} - -variable "datacenter_id" { - type = "string" -} diff --git a/upi/vsphere/host_a_record/main.tf b/upi/vsphere/host_a_record/main.tf new file mode 100644 index 00000000000..8ffa864c06d --- /dev/null +++ b/upi/vsphere/host_a_record/main.tf @@ -0,0 +1,9 @@ +resource "aws_route53_record" "a_record" { + for_each = var.records + + type = "A" + ttl = "60" + zone_id = var.zone_id + name = each.key + records = [each.value] +} diff --git a/upi/vsphere/host_a_record/outputs.tf b/upi/vsphere/host_a_record/outputs.tf new file mode 100644 index 00000000000..091a4028856 --- /dev/null +++ b/upi/vsphere/host_a_record/outputs.tf @@ -0,0 +1,3 @@ +output "fqdns" { + value = values(aws_route53_record.a_record)[*].name +} diff --git a/upi/vsphere/host_a_record/variables.tf b/upi/vsphere/host_a_record/variables.tf new file mode 100644 index 00000000000..710ed1e1fdf --- /dev/null +++ b/upi/vsphere/host_a_record/variables.tf @@ -0,0 +1,9 @@ +variable "zone_id" { + type = string + description = "The ID of the hosted zone to contain this record." +} + +variable "records" { + type = map(string) + description = "A records to be added to the zone_id" +} diff --git a/upi/vsphere/machine/cidr_to_ip.sh b/upi/vsphere/ipam/cidr_to_ip.sh similarity index 99% rename from upi/vsphere/machine/cidr_to_ip.sh rename to upi/vsphere/ipam/cidr_to_ip.sh index 9bfb742d7bf..11cd84a3d6a 100755 --- a/upi/vsphere/machine/cidr_to_ip.sh +++ b/upi/vsphere/ipam/cidr_to_ip.sh @@ -1,5 +1,5 @@ #!/bin/bash -# cidr_to_ip - +# cidr_to_ip - # https://www.terraform.io/docs/providers/external/data_source.html # Based on info from here: https://gist.github.com/irvingpop/968464132ded25a206ced835d50afa6b # This script takes requests an IP address from an IPAM server @@ -58,11 +58,11 @@ function produce_output() { # The verification and looping is a crude way of overcoming the lack of # currency safety in the IPAM server. while [[ $SECONDS -lt $timeout ]] - do + do ip_address=$(curl -s "http://$ipam/api/getFreeIP.php?apiapp=address&apitoken=$ipam_token&subnet=${network}&host=${hostname}") if [[ "$(is_ip_address "${ip_address}")" != "true" ]]; then error_exit "could not reserve an IP address: ${ip_address}"; fi - + if [[ "$ip_address" == "$(get_reservation)" ]] then jq -n \ diff --git a/upi/vsphere/ipam/main.tf b/upi/vsphere/ipam/main.tf new file mode 100644 index 00000000000..13501ea961a --- /dev/null +++ b/upi/vsphere/ipam/main.tf @@ -0,0 +1,41 @@ +locals { + network = cidrhost(var.machine_cidr, 0) + hostnames = length(var.static_ip_addresses) == 0 ? var.hostnames : [] + ip_addresses = length(var.static_ip_addresses) == 0 ? [for result in null_resource.ip_address : jsondecode(data.http.getip[result.triggers.hostname].body)[result.triggers.hostname]] : var.static_ip_addresses +} + +data "http" "getip" { + for_each = null_resource.ip_address + + url = "http://${var.ipam}/api/getIPs.php?apiapp=address&apitoken=${var.ipam_token}&domain=${null_resource.ip_address[each.key].triggers.hostname}" + + request_headers = { + Accept = "application/json" + } +} + +resource "null_resource" "ip_address" { + for_each = local.hostnames + + triggers = { + ipam = var.ipam + ipam_token = var.ipam_token + network = local.network + hostname = each.key + } + + provisioner "local-exec" { + command = <= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") ) diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 9c59fd413e1..6c9d80b98a5 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -34,7 +34,7 @@ type Route struct { Spec RouteSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // status is the current state of the route // +optional - Status RouteStatus `json:"status" protobuf:"bytes,3,opt,name=status"` + Status RouteStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -69,7 +69,7 @@ type RouteSpec struct { // chosen. // Must follow DNS952 subdomain conventions. // +optional - Host string `json:"host" protobuf:"bytes,1,opt,name=host"` + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // subdomain is a DNS subdomain that is requested within the ingress controller's // domain (as a subdomain). If host is set this field is ignored. An ingress // controller may choose to ignore this suggested name, in which case the controller @@ -141,7 +141,7 @@ type RouteStatus struct { // ingress describes the places where the route may be exposed. The list of // ingress points may contain duplicate Host or RouterName values. Routes // are considered live once they are `Ready` - Ingress []RouteIngress `json:"ingress" protobuf:"bytes,1,rep,name=ingress"` + Ingress []RouteIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // RouteIngress holds information about the places where a route is exposed. diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/common_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/common_types.go index 5ee990c9f21..b49141ac6dc 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/common_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/common_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -30,5 +31,90 @@ type ProviderSpec struct { // versioned API types that should be serialized/deserialized from this // field, akin to component config. // +optional + // +kubebuilder:validation:XPreserveUnknownFields Value *runtime.RawExtension `json:"value,omitempty"` } + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. This is a copy of customizable fields from metav1.ObjectMeta. +// +// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, +// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases +// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies +// the API and some issues that can impact user experience. +// +// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) +// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, +// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`. +// The investigation showed that `controller-tools@v2` behaves differently than its previous version +// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package. +// +// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` +// had validation properties, including for `creationTimestamp` (metav1.Time). +// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` +// which breaks validation because the field isn't marked as nullable. +// +// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded +// types. When that happens, this hack should be revisited. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go index 1de1b1807cf..ff690375605 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go @@ -69,7 +69,7 @@ type MachineSpec struct { // indicate what labels, annotations, name prefix, etc., should be used // when creating the Node. // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta `json:"metadata,omitempty"` // The list of the taints to be applied to the corresponding Node in additive // manner. This list will not overwrite any other taints added to the Node on @@ -154,6 +154,7 @@ type MachineStatus struct { // own versioned API types that should be // serialized/deserialized from this field. // +optional + // +kubebuilder:validation:XPreserveUnknownFields ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go index 4bb5d831e5e..21ff73e42c3 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go @@ -41,7 +41,8 @@ type MachineHealthCheckList struct { // MachineHealthCheckSpec defines the desired state of MachineHealthCheck type MachineHealthCheckSpec struct { - // Label selector to match machines whose health will be exercised + // Label selector to match machines whose health will be exercised. + // Note: An empty selector will match all machines. Selector metav1.LabelSelector `json:"selector"` // UnhealthyConditions contains a list of the conditions that determine @@ -99,7 +100,6 @@ type MachineHealthCheckStatus struct { ExpectedMachines *int `json:"expectedMachines"` // total number of machines counted by this machine health check - // +kubebuilder:default=0 // +kubebuilder:validation:Minimum=0 CurrentHealthy *int `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` } diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go index e4c0666546c..e0991ea79a0 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go @@ -112,7 +112,7 @@ type MachineTemplateSpec struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the machine. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go index e50655f9cc2..f13a4dbb27c 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" ) @@ -449,6 +450,43 @@ func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]metav1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { *out = *in diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/addtoscheme_vsphereprovider_v1alpha1.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/addtoscheme_vsphereprovider_v1beta1.go similarity index 70% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/addtoscheme_vsphereprovider_v1alpha1.go rename to vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/addtoscheme_vsphereprovider_v1beta1.go index afc0de9b367..088adfa1aac 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/addtoscheme_vsphereprovider_v1alpha1.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/addtoscheme_vsphereprovider_v1beta1.go @@ -1,10 +1,10 @@ package apis import ( - "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1" + "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" ) func init() { // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) + AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme) } diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/vsphereproviderstatus_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/vsphereproviderstatus_types.go deleted file mode 100644 index 5cacc2153ce..00000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/vsphereproviderstatus_types.go +++ /dev/null @@ -1,43 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. -// It contains VSphere-specific status information. -// +k8s:openapi-gen=true -type VSphereMachineProviderStatus struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // TODO: populate what we need here: - // InstanceID is the ID of the instance in VSphere - // +optional - //InstanceID *string `json:"instanceId,omitempty"` - - // InstanceState is the provisioning state of the VSphere Instance. - // +optional - //InstanceState *string `json:"instanceState,omitempty"` - // - // TaskRef? - // Ready? - // Conditions is a set of conditions associated with the Machine to indicate - // errors or other status - // Conditions []VSphereMachineProviderCondition `json:"conditions,omitempty"` - - // TaskRef is a managed object reference to a Task related to the machine. - // This value is set automatically at runtime and should not be set or - // modified by users. - // +optional - TaskRef string `json:"taskRef,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -func init() { - SchemeBuilder.Register(&VSphereMachineProviderStatus{}) -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/doc.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/doc.go similarity index 67% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/doc.go rename to vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/doc.go index 2095a0f1ba1..d59eebf3d66 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/doc.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/doc.go @@ -1,7 +1,7 @@ -// Package v1alpha1 contains API Schema definitions for the vsphereprovider v1alpha1 API group +// Package v1beta1 contains API Schema definitions for the vsphereprovider v1beta1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider // +k8s:defaulter-gen=TypeMeta // +groupName=vsphereprovider.machine.openshift.io -package v1alpha1 +package v1beta1 diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/register.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go similarity index 94% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/register.go rename to vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go index 8c58cb1d711..04fb4543fc3 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/register.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go @@ -1,10 +1,10 @@ -// Package v1alpha1 contains API Schema definitions for the vsphereprovider v1alpha1 API group +// Package v1beta1 contains API Schema definitions for the vsphereprovider v1beta1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider // +k8s:defaulter-gen=TypeMeta // +groupName=vsphereprovider.machine.openshift.io -package v1alpha1 +package v1beta1 import ( "encoding/json" @@ -19,7 +19,7 @@ import ( var ( // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "vsphereprovider.openshift.io", Version: "v1alpha1"} + SchemeGroupVersion = schema.GroupVersion{Group: "vsphereprovider.openshift.io", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/vsphereproviderconfig_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderconfig_types.go similarity index 73% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/vsphereproviderconfig_types.go rename to vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderconfig_types.go index f8ebc442923..68682cd1d3b 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/vsphereproviderconfig_types.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderconfig_types.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1beta1 import ( corev1 "k8s.io/api/core/v1" @@ -52,8 +52,38 @@ type VSphereMachineProviderSpec struct { // machine is cloned. // +optional DiskGiB int32 `json:"diskGiB,omitempty"` + // Snapshot is the name of the snapshot from which the VM was cloned + // +optional + Snapshot string `json:"snapshot"` + + // CloneMode specifies the type of clone operation. + // The LinkedClone mode is only support for templates that have at least + // one snapshot. If the template has no snapshots, then CloneMode defaults + // to FullClone. + // When LinkedClone mode is enabled the DiskGiB field is ignored as it is + // not possible to expand disks of linked clones. + // Defaults to LinkedClone, but fails gracefully to FullClone if the source + // of the clone operation has no snapshots. + // +optional + CloneMode CloneMode `json:"cloneMode,omitempty"` } +// CloneMode is the type of clone operation used to clone a VM from a template. +type CloneMode string + +const ( + // FullClone indicates a VM will have no relationship to the source of the + // clone operation once the operation is complete. This is the safest clone + // mode, but it is not the fastest. + FullClone CloneMode = "fullClone" + + // LinkedClone means resulting VMs will be dependent upon the snapshot of + // the source VM/template from which the VM was cloned. This is the fastest + // clone mode, but it also prevents expanding a VMs disk beyond the size of + // the source VM/template. + LinkedClone CloneMode = "linkedClone" +) + // NetworkSpec defines the virtual machine's network configuration. type NetworkSpec struct { Devices []NetworkDeviceSpec `json:"devices"` diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderstatus_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderstatus_types.go new file mode 100644 index 00000000000..ccffbb367ea --- /dev/null +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderstatus_types.go @@ -0,0 +1,83 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VSphereMachineProviderConditionType is a valid value for VSphereMachineProviderCondition.Type. +type VSphereMachineProviderConditionType string + +// Valid conditions for an vSphere machine instance. +const ( + // MachineCreation indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + MachineCreation VSphereMachineProviderConditionType = "MachineCreation" +) + +// VSphereMachineProviderConditionReason is reason for the condition's last transition. +type VSphereMachineProviderConditionReason string + +const ( + // MachineCreationSucceeded indicates machine creation success. + MachineCreationSucceeded VSphereMachineProviderConditionReason = "MachineCreationSucceeded" + // MachineCreationFailed indicates machine creation failure. + MachineCreationFailed VSphereMachineProviderConditionReason = "MachineCreationFailed" +) + +// VSphereMachineProviderCondition is a condition in a VSphereMachineProviderStatus. +type VSphereMachineProviderCondition struct { + // Type is the type of the condition. + Type VSphereMachineProviderConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason VSphereMachineProviderConditionReason `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains VSphere-specific status information. +// +k8s:openapi-gen=true +type VSphereMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // TODO: populate what we need here: + // InstanceID is the ID of the instance in VSphere + // +optional + InstanceID *string `json:"instanceId,omitempty"` + + // InstanceState is the provisioning state of the VSphere Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // + // TaskRef? + // Ready? + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + Conditions []VSphereMachineProviderCondition `json:"conditions,omitempty"` + + // TaskRef is a managed object reference to a Task related to the machine. + // This value is set automatically at runtime and should not be set or + // modified by users. + // +optional + TaskRef string `json:"taskRef,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +func init() { + SchemeBuilder.Register(&VSphereMachineProviderStatus{}) +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/zz_generated.deepcopy.go similarity index 79% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/zz_generated.deepcopy.go rename to vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/zz_generated.deepcopy.go index 9d24526c8bd..36ab434bcb8 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ // Code generated by controller-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( v1 "k8s.io/api/core/v1" @@ -61,6 +61,23 @@ func (in *NetworkSpec) DeepCopy() *NetworkSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderCondition) DeepCopyInto(out *VSphereMachineProviderCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderCondition. +func (in *VSphereMachineProviderCondition) DeepCopy() *VSphereMachineProviderCondition { + if in == nil { + return nil + } + out := new(VSphereMachineProviderCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { *out = *in @@ -106,7 +123,23 @@ func (in *VSphereMachineProviderSpec) DeepCopyObject() runtime.Object { func (in *VSphereMachineProviderStatus) DeepCopyInto(out *VSphereMachineProviderStatus) { *out = *in out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VSphereMachineProviderCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderStatus. diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go index 592d1864361..36a6804364c 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -62,10 +62,11 @@ var armorEndOfLine = []byte("-----") // lineReader wraps a line based reader. It watches for the end of an armor // block and records the expected CRC value. type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool } func (l *lineReader) Read(p []byte) (n int, err error) { @@ -87,6 +88,11 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return 0, ArmorCorrupt } + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + if len(line) == 5 && line[0] == '=' { // This is the checksum line var expectedBytes [3]byte @@ -108,6 +114,7 @@ func (l *lineReader) Read(p []byte) (n int, err error) { } l.eof = true + l.crcSet = true return 0, io.EOF } @@ -141,10 +148,8 @@ func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) r.currentCRC = crc24(r.currentCRC, p[:n]) - if err == io.EOF { - if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt } return diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go deleted file mode 100644 index 6e695e4272e..00000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_arm.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,!gccgo,!appengine,!nacl - -package poly1305 - -// poly1305_auth_armv6 is implemented in sum_arm.s -//go:noescape -func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) - -func sum(out *[16]byte, m []byte, key *[32]byte) { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s deleted file mode 100644 index f70b4ac4845..00000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_arm.s +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,!gccgo,!appengine,!nacl - -#include "textflag.h" - -// This code was translated into a form compatible with 5a from the public -// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. - -DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff -DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 -DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff -DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff -DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff -GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20 - -// Warning: the linker may use R11 to synthesize certain instructions. Please -// take care and verify that no synthetic instructions use it. - -TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0 - // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It - // might look like it's only 60 bytes of space but the final four bytes - // will be written by another function.) We need to skip over four - // bytes of stack because that's saving the value of 'g'. - ADD $4, R13, R8 - MOVM.IB [R4-R7], (R8) - MOVM.IA.W (R1), [R2-R5] - MOVW $·poly1305_init_constants_armv6<>(SB), R7 - MOVW R2, R8 - MOVW R2>>26, R9 - MOVW R3>>20, g - MOVW R4>>14, R11 - MOVW R5>>8, R12 - ORR R3<<6, R9, R9 - ORR R4<<12, g, g - ORR R5<<18, R11, R11 - MOVM.IA (R7), [R2-R6] - AND R8, R2, R2 - AND R9, R3, R3 - AND g, R4, R4 - AND R11, R5, R5 - AND R12, R6, R6 - MOVM.IA.W [R2-R6], (R0) - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - MOVM.IA.W [R2-R6], (R0) - MOVM.IA.W (R1), [R2-R5] - MOVM.IA [R2-R6], (R0) - ADD $20, R13, R0 - MOVM.DA (R0), [R4-R7] - RET - -#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ - MOVBU (offset+0)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+0)(Rdst); \ - MOVBU (offset+1)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+1)(Rdst); \ - MOVBU (offset+2)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+2)(Rdst); \ - MOVBU (offset+3)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+3)(Rdst) - -TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0 - // Needs 24 bytes of stack for saved registers and then 88 bytes of - // scratch space after that. We assume that 24 bytes at (R13) have - // already been used: four bytes for the link register saved in the - // prelude of poly1305_auth_armv6, four bytes for saving the value of g - // in that function and 16 bytes of scratch space used around - // poly1305_finish_ext_armv6_skip1. - ADD $24, R13, R12 - MOVM.IB [R4-R8, R14], (R12) - MOVW R0, 88(R13) - MOVW R1, 92(R13) - MOVW R2, 96(R13) - MOVW R1, R14 - MOVW R2, R12 - MOVW 56(R0), R8 - WORD $0xe1180008 // TST R8, R8 not working see issue 5921 - EOR R6, R6, R6 - MOVW.EQ $(1<<24), R6 - MOVW R6, 84(R13) - ADD $116, R13, g - MOVM.IA (R0), [R0-R9] - MOVM.IA [R0-R4], (g) - CMP $16, R12 - BLO poly1305_blocks_armv6_done - -poly1305_blocks_armv6_mainloop: - WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 - BEQ poly1305_blocks_armv6_mainloop_aligned - ADD $100, R13, g - MOVW_UNALIGNED(R14, g, R0, 0) - MOVW_UNALIGNED(R14, g, R0, 4) - MOVW_UNALIGNED(R14, g, R0, 8) - MOVW_UNALIGNED(R14, g, R0, 12) - MOVM.IA (g), [R0-R3] - ADD $16, R14 - B poly1305_blocks_armv6_mainloop_loaded - -poly1305_blocks_armv6_mainloop_aligned: - MOVM.IA.W (R14), [R0-R3] - -poly1305_blocks_armv6_mainloop_loaded: - MOVW R0>>26, g - MOVW R1>>20, R11 - MOVW R2>>14, R12 - MOVW R14, 92(R13) - MOVW R3>>8, R4 - ORR R1<<6, g, g - ORR R2<<12, R11, R11 - ORR R3<<18, R12, R12 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, g, g - MOVW 84(R13), R3 - BIC $0xfc000000, R11, R11 - BIC $0xfc000000, R12, R12 - ADD R0, R5, R5 - ADD g, R6, R6 - ORR R3, R4, R4 - ADD R11, R7, R7 - ADD $116, R13, R14 - ADD R12, R8, R8 - ADD R4, R9, R9 - MOVM.IA (R14), [R0-R4] - MULLU R4, R5, (R11, g) - MULLU R3, R5, (R14, R12) - MULALU R3, R6, (R11, g) - MULALU R2, R6, (R14, R12) - MULALU R2, R7, (R11, g) - MULALU R1, R7, (R14, R12) - ADD R4<<2, R4, R4 - ADD R3<<2, R3, R3 - MULALU R1, R8, (R11, g) - MULALU R0, R8, (R14, R12) - MULALU R0, R9, (R11, g) - MULALU R4, R9, (R14, R12) - MOVW g, 76(R13) - MOVW R11, 80(R13) - MOVW R12, 68(R13) - MOVW R14, 72(R13) - MULLU R2, R5, (R11, g) - MULLU R1, R5, (R14, R12) - MULALU R1, R6, (R11, g) - MULALU R0, R6, (R14, R12) - MULALU R0, R7, (R11, g) - MULALU R4, R7, (R14, R12) - ADD R2<<2, R2, R2 - ADD R1<<2, R1, R1 - MULALU R4, R8, (R11, g) - MULALU R3, R8, (R14, R12) - MULALU R3, R9, (R11, g) - MULALU R2, R9, (R14, R12) - MOVW g, 60(R13) - MOVW R11, 64(R13) - MOVW R12, 52(R13) - MOVW R14, 56(R13) - MULLU R0, R5, (R11, g) - MULALU R4, R6, (R11, g) - MULALU R3, R7, (R11, g) - MULALU R2, R8, (R11, g) - MULALU R1, R9, (R11, g) - ADD $52, R13, R0 - MOVM.IA (R0), [R0-R7] - MOVW g>>26, R12 - MOVW R4>>26, R14 - ORR R11<<6, R12, R12 - ORR R5<<6, R14, R14 - BIC $0xfc000000, g, g - BIC $0xfc000000, R4, R4 - ADD.S R12, R0, R0 - ADC $0, R1, R1 - ADD.S R14, R6, R6 - ADC $0, R7, R7 - MOVW R0>>26, R12 - MOVW R6>>26, R14 - ORR R1<<6, R12, R12 - ORR R7<<6, R14, R14 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, R6, R6 - ADD R14<<2, R14, R14 - ADD.S R12, R2, R2 - ADC $0, R3, R3 - ADD R14, g, g - MOVW R2>>26, R12 - MOVW g>>26, R14 - ORR R3<<6, R12, R12 - BIC $0xfc000000, g, R5 - BIC $0xfc000000, R2, R7 - ADD R12, R4, R4 - ADD R14, R0, R0 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R8 - ADD R12, R6, R9 - MOVW 96(R13), R12 - MOVW 92(R13), R14 - MOVW R0, R6 - CMP $32, R12 - SUB $16, R12, R12 - MOVW R12, 96(R13) - BHS poly1305_blocks_armv6_mainloop - -poly1305_blocks_armv6_done: - MOVW 88(R13), R12 - MOVW R5, 20(R12) - MOVW R6, 24(R12) - MOVW R7, 28(R12) - MOVW R8, 32(R12) - MOVW R9, 36(R12) - ADD $48, R13, R0 - MOVM.DA (R0), [R4-R8, R14] - RET - -#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst); \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst) - -#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) - -// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) -TEXT ·poly1305_auth_armv6(SB), $196-16 - // The value 196, just above, is the sum of 64 (the size of the context - // structure) and 132 (the amount of stack needed). - // - // At this point, the stack pointer (R13) has been moved down. It - // points to the saved link register and there's 196 bytes of free - // space above it. - // - // The stack for this function looks like: - // - // +--------------------- - // | - // | 64 bytes of context structure - // | - // +--------------------- - // | - // | 112 bytes for poly1305_blocks_armv6 - // | - // +--------------------- - // | 16 bytes of final block, constructed at - // | poly1305_finish_ext_armv6_skip8 - // +--------------------- - // | four bytes of saved 'g' - // +--------------------- - // | lr, saved by prelude <- R13 points here - // +--------------------- - MOVW g, 4(R13) - - MOVW out+0(FP), R4 - MOVW m+4(FP), R5 - MOVW mlen+8(FP), R6 - MOVW key+12(FP), R7 - - ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112 - MOVW R7, R1 - - // poly1305_init_ext_armv6 will write to the stack from R13+4, but - // that's ok because none of the other values have been written yet. - BL poly1305_init_ext_armv6<>(SB) - BIC.S $15, R6, R2 - BEQ poly1305_auth_armv6_noblocks - ADD $136, R13, R0 - MOVW R5, R1 - ADD R2, R5, R5 - SUB R2, R6, R6 - BL poly1305_blocks_armv6<>(SB) - -poly1305_auth_armv6_noblocks: - ADD $136, R13, R0 - MOVW R5, R1 - MOVW R6, R2 - MOVW R4, R3 - - MOVW R0, R5 - MOVW R1, R6 - MOVW R2, R7 - MOVW R3, R8 - AND.S R2, R2, R2 - BEQ poly1305_finish_ext_armv6_noremaining - EOR R0, R0 - ADD $8, R13, R9 // 8 = offset to 16 byte scratch space - MOVW R0, (R9) - MOVW R0, 4(R9) - MOVW R0, 8(R9) - MOVW R0, 12(R9) - WORD $0xe3110003 // TST R1, #3 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_aligned - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8 - MOVWP_UNALIGNED(R1, R9, g) - MOVWP_UNALIGNED(R1, R9, g) - -poly1305_finish_ext_armv6_skip8: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4 - MOVWP_UNALIGNED(R1, R9, g) - -poly1305_finish_ext_armv6_skip4: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHUP_UNALIGNED(R1, R9, g) - B poly1305_finish_ext_armv6_skip2 - -poly1305_finish_ext_armv6_aligned: - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8_aligned - MOVM.IA.W (R1), [g-R11] - MOVM.IA.W [g-R11], (R9) - -poly1305_finish_ext_armv6_skip8_aligned: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4_aligned - MOVW.P 4(R1), g - MOVW.P g, 4(R9) - -poly1305_finish_ext_armv6_skip4_aligned: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHU.P 2(R1), g - MOVH.P g, 2(R9) - -poly1305_finish_ext_armv6_skip2: - WORD $0xe3120001 // TST $1, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip1 - MOVBU.P 1(R1), g - MOVBU.P g, 1(R9) - -poly1305_finish_ext_armv6_skip1: - MOVW $1, R11 - MOVBU R11, 0(R9) - MOVW R11, 56(R5) - MOVW R5, R0 - ADD $8, R13, R1 - MOVW $16, R2 - BL poly1305_blocks_armv6<>(SB) - -poly1305_finish_ext_armv6_noremaining: - MOVW 20(R5), R0 - MOVW 24(R5), R1 - MOVW 28(R5), R2 - MOVW 32(R5), R3 - MOVW 36(R5), R4 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R4 - ADD R12<<2, R12, R12 - ADD R12, R0, R0 - MOVW R0>>26, R12 - BIC $0xfc000000, R0, R0 - ADD R12, R1, R1 - MOVW R1>>26, R12 - BIC $0xfc000000, R1, R1 - ADD R12, R2, R2 - MOVW R2>>26, R12 - BIC $0xfc000000, R2, R2 - ADD R12, R3, R3 - MOVW R3>>26, R12 - BIC $0xfc000000, R3, R3 - ADD R12, R4, R4 - ADD $5, R0, R6 - MOVW R6>>26, R12 - BIC $0xfc000000, R6, R6 - ADD R12, R1, R7 - MOVW R7>>26, R12 - BIC $0xfc000000, R7, R7 - ADD R12, R2, g - MOVW g>>26, R12 - BIC $0xfc000000, g, g - ADD R12, R3, R11 - MOVW $-(1<<26), R12 - ADD R11>>26, R12, R12 - BIC $0xfc000000, R11, R11 - ADD R12, R4, R9 - MOVW R9>>31, R12 - SUB $1, R12 - AND R12, R6, R6 - AND R12, R7, R7 - AND R12, g, g - AND R12, R11, R11 - AND R12, R9, R9 - MVN R12, R12 - AND R12, R0, R0 - AND R12, R1, R1 - AND R12, R2, R2 - AND R12, R3, R3 - AND R12, R4, R4 - ORR R6, R0, R0 - ORR R7, R1, R1 - ORR g, R2, R2 - ORR R11, R3, R3 - ORR R9, R4, R4 - ORR R1<<26, R0, R0 - MOVW R1>>6, R1 - ORR R2<<20, R1, R1 - MOVW R2>>12, R2 - ORR R3<<14, R2, R2 - MOVW R3>>18, R3 - ORR R4<<8, R3, R3 - MOVW 40(R5), R6 - MOVW 44(R5), R7 - MOVW 48(R5), g - MOVW 52(R5), R11 - ADD.S R6, R0, R0 - ADC.S R7, R1, R1 - ADC.S g, R2, R2 - ADC.S R11, R3, R3 - MOVM.IA [R0-R3], (R8) - MOVW R5, R12 - EOR R0, R0, R0 - EOR R1, R1, R1 - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - EOR R7, R7, R7 - MOVM.IA.W [R0-R7], (R12) - MOVM.IA [R0-R7], (R12) - MOVW 4(R13), g - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go index 1682eda45f1..32a9cef6bbf 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl +// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl package poly1305 diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go new file mode 100644 index 00000000000..af81d266546 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. +// +// See https://flak.tedunangst.com/post/bcrypt-pbkdf and +// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. +package bcrypt_pbkdf + +import ( + "crypto/sha512" + "errors" + "golang.org/x/crypto/blowfish" +) + +const blockSize = 32 + +// Key derives a key from the password, salt and rounds count, returning a +// []byte of length keyLen that can be used as cryptographic key. +func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { + if rounds < 1 { + return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") + } + if len(password) == 0 { + return nil, errors.New("bcrypt_pbkdf: empty password") + } + if len(salt) == 0 || len(salt) > 1<<20 { + return nil, errors.New("bcrypt_pbkdf: bad salt length") + } + if keyLen > 1024 { + return nil, errors.New("bcrypt_pbkdf: keyLen is too large") + } + + numBlocks := (keyLen + blockSize - 1) / blockSize + key := make([]byte, numBlocks*blockSize) + + h := sha512.New() + h.Write(password) + shapass := h.Sum(nil) + + shasalt := make([]byte, 0, sha512.Size) + cnt, tmp := make([]byte, 4), make([]byte, blockSize) + for block := 1; block <= numBlocks; block++ { + h.Reset() + h.Write(salt) + cnt[0] = byte(block >> 24) + cnt[1] = byte(block >> 16) + cnt[2] = byte(block >> 8) + cnt[3] = byte(block) + h.Write(cnt) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + + out := make([]byte, blockSize) + copy(out, tmp) + for i := 2; i <= rounds; i++ { + h.Reset() + h.Write(tmp) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + for j := 0; j < len(out); j++ { + out[j] ^= tmp[j] + } + } + + for i, v := range out { + key[i*numBlocks+(block-1)] = v + } + } + return key[:keyLen], nil +} + +var magic = []byte("OxychromaticBlowfishSwatDynamite") + +func bcryptHash(out, shapass, shasalt []byte) { + c, err := blowfish.NewSaltedCipher(shapass, shasalt) + if err != nil { + panic(err) + } + for i := 0; i < 64; i++ { + blowfish.ExpandKey(shasalt, c) + blowfish.ExpandKey(shapass, c) + } + copy(out, magic) + for i := 0; i < 32; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(out[i:i+8], out[i:i+8]) + } + } + // Swap bytes due to different endianness. + for i := 0; i < 32; i += 4 { + out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] + } +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index c148ad4c4f3..06f537c135a 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -7,6 +7,8 @@ package ssh import ( "bytes" "crypto" + "crypto/aes" + "crypto/cipher" "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" @@ -25,6 +27,7 @@ import ( "strings" "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) // These constants represent the algorithm names for key types supported by this @@ -559,9 +562,11 @@ func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { return nil, nil, err } - key := ed25519.PublicKey(w.KeyBytes) + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } - return (ed25519PublicKey)(key), w.Rest, nil + return ed25519PublicKey(w.KeyBytes), w.Rest, nil } func (k ed25519PublicKey) Marshal() []byte { @@ -579,9 +584,11 @@ func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } + if l := len(k); l != ed25519.PublicKeySize { + return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } - edKey := (ed25519.PublicKey)(k) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { return errors.New("ssh: signature did not verify") } @@ -835,6 +842,10 @@ func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { return nil, nil, err } + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + key := new(skEd25519PublicKey) key.application = w.Application key.PublicKey = ed25519.PublicKey(w.KeyBytes) @@ -859,6 +870,9 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } + if l := len(k.PublicKey); l != ed25519.PublicKeySize { + return fmt.Errorf("invalid size %d for Ed25519 public key", l) + } h := sha256.New() h.Write([]byte(k.application)) @@ -895,8 +909,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { original := Marshal(blob) - edKey := (ed25519.PublicKey)(k.PublicKey) - if ok := ed25519.Verify(edKey, original, edSig.Signature); !ok { + if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { return errors.New("ssh: signature did not verify") } @@ -1048,7 +1061,10 @@ func NewPublicKey(key interface{}) (PublicKey, error) { case *dsa.PublicKey: return (*dsaPublicKey)(key), nil case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil + if l := len(key); l != ed25519.PublicKeySize { + return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } + return ed25519PublicKey(key), nil default: return nil, fmt.Errorf("ssh: unsupported key type %T", key) } @@ -1122,21 +1138,25 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { case "DSA PRIVATE KEY": return ParseDSAPrivateKey(block.Bytes) case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes) + return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) default: return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) } } // ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If wrong passphrase, return -// x509.IncorrectPasswordError. +// passphrase from a PEM encoded private key. If the passphrase is wrong, it +// will return x509.IncorrectPasswordError. func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { block, _ := pem.Decode(pemBytes) if block == nil { return nil, errors.New("ssh: no key found") } + if block.Type == "OPENSSH PRIVATE KEY" { + return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) + } + if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { return nil, errors.New("ssh: not an encrypted key") } @@ -1193,9 +1213,60 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { }, nil } -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { +func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName != "none" || cipherName != "none" { + return nil, &PassphraseMissingError{} + } + if kdfOpts != "" { + return nil, errors.New("ssh: invalid openssh private key") + } + return privKeyBlock, nil +} + +func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { + return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName == "none" || cipherName == "none" { + return nil, errors.New("ssh: key is not password protected") + } + if kdfName != "bcrypt" { + return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") + } + + var opts struct { + Salt string + Rounds uint32 + } + if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { + return nil, err + } + + k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) + if err != nil { + return nil, err + } + key, iv := k[:32], k[32:] + + if cipherName != "aes256-ctr" { + return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q", cipherName, "aes256-ctr") + } + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + ctr := cipher.NewCTR(c, iv) + ctr.XORKeyStream(privKeyBlock, privKeyBlock) + + return privKeyBlock, nil + } +} + +type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) + +// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt +// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used +// as the decrypt function to parse an unencrypted private key. See +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. +func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { const magic = "openssh-key-v1\x00" if len(key) < len(magic) || string(key[:len(magic)]) != magic { return nil, errors.New("ssh: invalid openssh private key format") @@ -1214,9 +1285,22 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { if err := Unmarshal(remaining, &w); err != nil { return nil, err } + if w.NumKeys != 1 { + // We only support single key files, and so does OpenSSH. + // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 + return nil, errors.New("ssh: multi-key files are not supported") + } - if w.KdfName != "none" || w.CipherName != "none" { - return nil, errors.New("ssh: cannot decode encrypted private keys") + privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) + if err != nil { + if err, ok := err.(*PassphraseMissingError); ok { + pub, errPub := ParsePublicKey(w.PubKey) + if errPub != nil { + return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) + } + err.PublicKey = pub + } + return nil, err } pk1 := struct { @@ -1226,15 +1310,13 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { Rest []byte `ssh:"rest"` }{} - if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { - return nil, err - } - - if pk1.Check1 != pk1.Check2 { - return nil, errors.New("ssh: checkint mismatch") + if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { + if w.CipherName != "none" { + return nil, x509.IncorrectPasswordError + } + return nil, errors.New("ssh: malformed OpenSSH key") } - // we only handle ed25519 and rsa keys currently switch pk1.Keytype { case KeyAlgoRSA: // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 @@ -1253,10 +1335,8 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { return nil, err } - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err } pk := &rsa.PrivateKey{ @@ -1291,20 +1371,78 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { return nil, errors.New("ssh: private key unexpected length") } - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err } pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) copy(pk, key.Priv) return &pk, nil + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + key := struct { + Curve string + Pub []byte + D *big.Int + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + var curve elliptic.Curve + switch key.Curve { + case "nistp256": + curve = elliptic.P256() + case "nistp384": + curve = elliptic.P384() + case "nistp521": + curve = elliptic.P521() + default: + return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) + } + + X, Y := elliptic.Unmarshal(curve, key.Pub) + if X == nil || Y == nil { + return nil, errors.New("ssh: failed to unmarshal public key") + } + + if key.D.Cmp(curve.Params().N) >= 0 { + return nil, errors.New("ssh: scalar is out of range") + } + + x, y := curve.ScalarBaseMult(key.D.Bytes()) + if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { + return nil, errors.New("ssh: public key does not match private key") + } + + return &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: X, + Y: Y, + }, + D: key.D, + }, nil default: return nil, errors.New("ssh: unhandled key type") } } +func checkOpenSSHKeyPadding(pad []byte) error { + for i, b := range pad { + if int(b) != i+1 { + return errors.New("ssh: padding not as expected") + } + } + return nil +} + // FingerprintLegacyMD5 returns the user presentation of the key's // fingerprint as described by RFC 4716 section 4. func FingerprintLegacyMD5(pubKey PublicKey) string { diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index 2f04ee5b5c2..d1b4fca3a94 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -7,6 +7,7 @@ package terminal import ( "bytes" "io" + "runtime" "strconv" "sync" "unicode/utf8" @@ -939,6 +940,8 @@ func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { // readPasswordLine reads from reader until it finds \n or io.EOF. // The slice returned does not include the \n. // readPasswordLine also ignores any \r it finds. +// Windows uses \r as end of line. So, on Windows, readPasswordLine +// reads until it finds \r and ignores any \n it finds during processing. func readPasswordLine(reader io.Reader) ([]byte, error) { var buf [1]byte var ret []byte @@ -947,10 +950,20 @@ func readPasswordLine(reader io.Reader) ([]byte, error) { n, err := reader.Read(buf[:]) if n > 0 { switch buf[0] { + case '\b': + if len(ret) > 0 { + ret = ret[:len(ret)-1] + } case '\n': - return ret, nil + if runtime.GOOS != "windows" { + return ret, nil + } + // otherwise ignore \n case '\r': - // remove \r from passwords on Windows + if runtime.GOOS == "windows" { + return ret, nil + } + // otherwise ignore \r default: ret = append(ret, buf[0]) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 5cfdf8f3f03..f614e9cb607 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -85,8 +85,8 @@ func ReadPassword(fd int) ([]byte, error) { } old := st - st &^= (windows.ENABLE_ECHO_INPUT) - st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT) + st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT) if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { return nil, err } diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod index 44179f3a422..b32309c45fd 100644 --- a/vendor/golang.org/x/lint/go.mod +++ b/vendor/golang.org/x/lint/go.mod @@ -2,4 +2,4 @@ module golang.org/x/lint go 1.11 -require golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f +require golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum index 539c98a94a9..2ad45cae246 100644 --- a/vendor/golang.org/x/lint/go.sum +++ b/vendor/golang.org/x/lint/go.sum @@ -1,8 +1,12 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/lint/lint.go b/vendor/golang.org/x/lint/lint.go index 532a75ad247..7d813e061ad 100644 --- a/vendor/golang.org/x/lint/lint.go +++ b/vendor/golang.org/x/lint/lint.go @@ -839,6 +839,7 @@ var commonMethods = map[string]bool{ "ServeHTTP": true, "String": true, "Write": true, + "Unwrap": true, } // lintFuncDoc examines doc comments on functions and methods. diff --git a/vendor/modules.txt b/vendor/modules.txt index c597a3c2d8b..f45e4a3c1cc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,8 +17,6 @@ cloud.google.com/go/storage github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns -github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/resources -github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/subscriptions github.com/Azure/azure-sdk-for-go/services/analysisservices/mgmt/2017-08-01/analysisservices github.com/Azure/azure-sdk-for-go/services/apimanagement/mgmt/2018-01-01/apimanagement github.com/Azure/azure-sdk-for-go/services/appconfiguration/mgmt/2019-10-01/appconfiguration @@ -1003,14 +1001,14 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/openshift-metal3/terraform-provider-ironic v0.1.9 +# github.com/openshift-metal3/terraform-provider-ironic v0.2.0 github.com/openshift-metal3/terraform-provider-ironic/ironic -# github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible => github.com/openshift/api v0.0.0-20200210091934-a0e53e94816b +# github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible => github.com/openshift/api v0.0.0-20200413201024-c6e8c9b6eb9a github.com/openshift/api/config/v1 github.com/openshift/api/operator/v1 github.com/openshift/api/operator/v1alpha1 github.com/openshift/api/route/v1 -# github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 +# github.com/openshift/client-go v0.0.0-20200320150128-a906f3d8e723 => github.com/openshift/client-go v0.0.0-20200116152001-92a2713fa240 github.com/openshift/client-go/config/clientset/versioned github.com/openshift/client-go/config/clientset/versioned/scheme github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 @@ -1035,13 +1033,13 @@ github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig # github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200128081049-840376ca5c09 github.com/openshift/cluster-api-provider-ovirt/pkg/apis github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1 -# github.com/openshift/library-go v0.0.0-20200210105614-4bf528465627 +# github.com/openshift/library-go v0.0.0-20200324092245-db2a8546af81 github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers -# github.com/openshift/machine-api-operator v0.2.1-0.20200310180732-c63fa2b143f0 +# github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290 github.com/openshift/machine-api-operator/pkg/apis/machine github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1 github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider -github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1alpha1 +github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1 # github.com/openshift/machine-config-operator v4.2.0-alpha.0.0.20190917115525-033375cbe820+incompatible => github.com/openshift/machine-config-operator v0.0.1-0.20200130220348-e5685c0cf530 github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1 # github.com/ovirt/go-ovirt v4.3.4+incompatible @@ -1384,7 +1382,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # go4.org v0.0.0-20191010144846-132d2879e1e9 go4.org/errorutil -# golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 +# golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -1405,12 +1403,13 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/poly1305 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent +golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/ssh/terminal # golang.org/x/exp v0.0.0-20191129062945-2f5052295587 golang.org/x/exp/apidiff golang.org/x/exp/cmd/apidiff -# golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f +# golang.org/x/lint v0.0.0-20200302205851-738671d3881b golang.org/x/lint golang.org/x/lint/golint # golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee @@ -1642,7 +1641,7 @@ honnef.co/go/tools/staticcheck/vrp honnef.co/go/tools/stylecheck honnef.co/go/tools/unused honnef.co/go/tools/version -# k8s.io/api v0.17.2 => k8s.io/api v0.17.1 +# k8s.io/api v0.18.0 => k8s.io/api v0.17.1 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 @@ -1683,7 +1682,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.17.3 => k8s.io/apimachinery v0.17.1 +# k8s.io/apimachinery v0.18.0 => k8s.io/apimachinery v0.17.1 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1798,7 +1797,7 @@ k8s.io/client-go/util/retry k8s.io/klog # k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a k8s.io/kube-openapi/pkg/common -# k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc +# k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/pointer @@ -1810,10 +1809,10 @@ sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1beta1 sigs.k8s.io/cluster-api-provider-azure/pkg/apis sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1alpha1 sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1 -# sigs.k8s.io/cluster-api-provider-openstack v0.0.0 => github.com/openshift/cluster-api-provider-openstack v0.0.0-20200221124403-d699c3611b0c +# sigs.k8s.io/cluster-api-provider-openstack v0.0.0 => github.com/openshift/cluster-api-provider-openstack v0.0.0-20200323110431-3311de91e078 sigs.k8s.io/cluster-api-provider-openstack/pkg/apis sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1 # sigs.k8s.io/controller-runtime v0.4.0 sigs.k8s.io/controller-runtime/pkg/scheme -# sigs.k8s.io/yaml v1.1.0 +# sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go index da8edf55fef..3c6385ad605 100644 --- a/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go @@ -121,6 +121,8 @@ type NetworkParam struct { Filter Filter `json:"filter,omitempty"` // Subnet within a network to use Subnets []SubnetParam `json:"subnets,omitempty"` + // NoAllowedAddressPairs disables creation of allowed address pairs for the network ports + NoAllowedAddressPairs bool `json:"noAllowedAddressPairs,omitempty"` } type Filter struct { diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml index 03ddc7318ae..d20e23eff43 100644 --- a/vendor/sigs.k8s.io/yaml/.travis.yml +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -1,14 +1,13 @@ language: go dist: xenial go: - - 1.9.x - - 1.10.x - - 1.11.x + - 1.12.x + - 1.13.x script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(gofmt -d *.go) - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) - - go tool vet . - - go test -v -race ./... + - GO111MODULE=on go vet . + - GO111MODULE=on go test -v -race ./... + - git diff --exit-code install: - - go get golang.org/x/lint/golint + - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 11ad7ce1a40..325b40b0763 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - dims - lavalamp diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md index 0200f75b4d1..5a651d91633 100644 --- a/vendor/sigs.k8s.io/yaml/README.md +++ b/vendor/sigs.k8s.io/yaml/README.md @@ -1,12 +1,14 @@ # YAML marshaling and unmarshaling support for Go -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) +[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml) + +kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml). ## Introduction A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). ## Compatibility @@ -32,13 +34,13 @@ GOOD: To install, run: ``` -$ go get github.com/ghodss/yaml +$ go get sigs.k8s.io/yaml ``` And import using: ``` -import "github.com/ghodss/yaml" +import "sigs.k8s.io/yaml" ``` Usage is very similar to the JSON library: @@ -49,7 +51,7 @@ package main import ( "fmt" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) type Person struct { @@ -93,7 +95,7 @@ package main import ( "fmt" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) func main() { diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod new file mode 100644 index 00000000000..7224f34971c --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/go.mod @@ -0,0 +1,8 @@ +module sigs.k8s.io/yaml + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + gopkg.in/yaml.v2 v2.2.8 +) diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum new file mode 100644 index 00000000000..76e49483af4 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go index 024596112ac..efbc535d416 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -317,3 +317,64 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in return yamlObj, nil } } + +// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice, +// without going through a byte representation. A nil or empty map[string]interface{} input is +// converted to an empty map, i.e. yaml.MapSlice(nil). +// +// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice. +// +// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml: +// - float64s are down-casted as far as possible without data-loss to int, int64, uint64. +// - int64s are down-casted to int if possible without data-loss. +// +// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case. +// +// string, bool and any other types are unchanged. +func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice { + if len(j) == 0 { + return nil + } + ret := make(yaml.MapSlice, 0, len(j)) + for k, v := range j { + ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)}) + } + return ret +} + +func jsonToYAMLValue(j interface{}) interface{} { + switch j := j.(type) { + case map[string]interface{}: + if j == nil { + return interface{}(nil) + } + return JSONObjectToYAMLObject(j) + case []interface{}: + if j == nil { + return interface{}(nil) + } + ret := make([]interface{}, len(j)) + for i := range j { + ret[i] = jsonToYAMLValue(j[i]) + } + return ret + case float64: + // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 + if i64 := int64(j); j == float64(i64) { + if i := int(i64); i64 == int64(i) { + return i + } + return i64 + } + if ui64 := uint64(j); j == float64(ui64) { + return ui64 + } + return j + case int64: + if i := int(j); j == int64(i) { + return i + } + return j + } + return j +}