diff --git a/pkg/asset/cluster/cluster.go b/pkg/asset/cluster/cluster.go index af8723b30ee..f3b697e944f 100644 --- a/pkg/asset/cluster/cluster.go +++ b/pkg/asset/cluster/cluster.go @@ -5,30 +5,22 @@ import ( "fmt" "path/filepath" "strings" - "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/wait" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - utilkubeconfig "sigs.k8s.io/cluster-api/util/kubeconfig" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/cluster/aws" "github.com/openshift/installer/pkg/asset/cluster/azure" "github.com/openshift/installer/pkg/asset/cluster/openstack" + "github.com/openshift/installer/pkg/asset/cluster/tfvars" + "github.com/openshift/installer/pkg/asset/ignition/bootstrap" + "github.com/openshift/installer/pkg/asset/ignition/machine" "github.com/openshift/installer/pkg/asset/installconfig" - awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" "github.com/openshift/installer/pkg/asset/kubeconfig" - "github.com/openshift/installer/pkg/asset/manifests/capiutils" capimanifests "github.com/openshift/installer/pkg/asset/manifests/clusterapi" "github.com/openshift/installer/pkg/asset/password" "github.com/openshift/installer/pkg/asset/quota" - "github.com/openshift/installer/pkg/clusterapi" infra "github.com/openshift/installer/pkg/infrastructure/platform" typesaws "github.com/openshift/installer/pkg/types/aws" typesazure "github.com/openshift/installer/pkg/types/azure" @@ -67,10 +59,12 @@ func (c *Cluster) Dependencies() []asset.Asset { &installconfig.PlatformPermsCheck{}, &installconfig.PlatformProvisionCheck{}, "a.PlatformQuotaCheck{}, - &TerraformVariables{}, + &tfvars.TerraformVariables{}, &password.KubeadminPassword{}, &capimanifests.Cluster{}, &kubeconfig.AdminClient{}, + &bootstrap.Bootstrap{}, + &machine.Master{}, } } @@ -82,7 +76,7 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) { clusterID := &installconfig.ClusterID{} installConfig := &installconfig.InstallConfig{} - terraformVariables := &TerraformVariables{} + terraformVariables := &tfvars.TerraformVariables{} parents.Get(clusterID, installConfig, terraformVariables) if fs := installConfig.Config.FeatureSet; strings.HasSuffix(string(fs), "NoUpgrade") { @@ -97,31 +91,14 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) { return errors.New("cluster cannot be created with bootstrapInPlace set") } - // Check if we're using Cluster API. - if capiutils.IsEnabled(installConfig) { - // TODO(vincepri): The context should be passed down from the caller, - // although today the Asset interface doesn't allow it, refactor once it does. - ctx, cancel := context.WithCancel(signals.SetupSignalHandler()) - go func() { - <-ctx.Done() - cancel() - clusterapi.System().Teardown() - }() - - return c.provisionWithClusterAPI(ctx, parents, installConfig, clusterID) - } - - // Otherwise, use the normal path. - return c.provision(installConfig, clusterID, terraformVariables) -} - -func (c *Cluster) provision(installConfig *installconfig.InstallConfig, clusterID *installconfig.ClusterID, terraformVariables *TerraformVariables) error { platform := installConfig.Config.Platform.Name() if azure := installConfig.Config.Platform.Azure; azure != nil && azure.CloudName == typesazure.StackCloud { platform = typesazure.StackTerraformName } + // TODO(padillon): determine whether CAPI handles tagging shared subnets, in which case we should be able + // to encapsulate these into the terraform package. logrus.Infof("Creating infrastructure resources...") switch platform { case typesaws.Name: @@ -138,16 +115,11 @@ func (c *Cluster) provision(installConfig *installconfig.InstallConfig, clusterI } } - tfvarsFiles := []*asset.File{} - for _, file := range terraformVariables.Files() { - tfvarsFiles = append(tfvarsFiles, file) - } - provider, err := infra.ProviderForPlatform(platform, installConfig.Config.EnabledFeatureGates()) if err != nil { return fmt.Errorf("error getting infrastructure provider: %w", err) } - files, err := provider.Provision(InstallDir, tfvarsFiles) + files, err := provider.Provision(InstallDir, parents) if files != nil { c.FileList = append(c.FileList, files...) // append state files even in case of failure } @@ -158,137 +130,6 @@ func (c *Cluster) provision(installConfig *installconfig.InstallConfig, clusterI return nil } -func (c *Cluster) provisionWithClusterAPI(ctx context.Context, parents asset.Parents, installConfig *installconfig.InstallConfig, clusterID *installconfig.ClusterID) error { - capiManifests := &capimanifests.Cluster{} - clusterKubeconfigAsset := &kubeconfig.AdminClient{} - parents.Get( - capiManifests, - clusterKubeconfigAsset, - ) - - // Only need the objects--not the files. - manifests := []client.Object{} - for _, m := range capiManifests.RuntimeFiles() { - manifests = append(manifests, m.Object) - } - - // Run the CAPI system. - capiSystem := clusterapi.System() - if err := capiSystem.Run(ctx, installConfig); err != nil { - return fmt.Errorf("failed to run cluster api system: %w", err) - } - - // Grab the client. - cl := capiSystem.Client() - - // Create all the manifests and store them. - for _, m := range manifests { - m.SetNamespace(capiutils.Namespace) - if err := cl.Create(context.Background(), m); err != nil { - return fmt.Errorf("failed to create manifest: %w", err) - } - logrus.Infof("Created manifest %+T, namespace=%s name=%s", m, m.GetNamespace(), m.GetName()) - } - - // Pass cluster kubeconfig and store it in; this is usually the role of a bootstrap provider. - { - key := client.ObjectKey{ - Name: clusterID.InfraID, - Namespace: capiutils.Namespace, - } - cluster := &clusterv1.Cluster{} - if err := cl.Get(context.Background(), key, cluster); err != nil { - return err - } - // Create the secret. - clusterKubeconfig := clusterKubeconfigAsset.Files()[0].Data - secret := utilkubeconfig.GenerateSecret(cluster, clusterKubeconfig) - if err := cl.Create(context.Background(), secret); err != nil { - return err - } - } - - // Wait for the load balancer to be ready by checking the control plane endpoint - // on the cluster object. - var cluster *clusterv1.Cluster - { - if err := wait.ExponentialBackoff(wait.Backoff{ - Duration: time.Second * 10, - Factor: float64(1.5), - Steps: 32, - }, func() (bool, error) { - c := &clusterv1.Cluster{} - if err := cl.Get(context.Background(), client.ObjectKey{ - Name: clusterID.InfraID, - Namespace: capiutils.Namespace, - }, c); err != nil { - if apierrors.IsNotFound(err) { - return false, nil - } - return false, err - } - cluster = c - return cluster.Spec.ControlPlaneEndpoint.IsValid(), nil - }); err != nil { - return err - } - if cluster == nil { - return errors.New("error occurred during load balancer ready check") - } - if cluster.Spec.ControlPlaneEndpoint.Host == "" { - return errors.New("control plane endpoint is not set") - } - } - - // Run the post-provisioning steps for the platform we're on. - // TODO(vincepri): The following should probably be in a separate package with a clear - // interface and multiple hooks at different stages of the cluster lifecycle. - switch installConfig.Config.Platform.Name() { - case typesaws.Name: - ssn, err := installConfig.AWS.Session(context.TODO()) - if err != nil { - return fmt.Errorf("failed to create session: %w", err) - } - client := awsconfig.NewClient(ssn) - r53cfg := awsconfig.GetR53ClientCfg(ssn, "") - err = client.CreateOrUpdateRecord(installConfig.Config, cluster.Spec.ControlPlaneEndpoint.Host, r53cfg) - if err != nil { - return fmt.Errorf("failed to create route53 records: %w", err) - } - logrus.Infof("Created Route53 records to control plane load balancer.") - default: - } - - // For each manifest we created, retrieve it and store it in the asset. - for _, m := range manifests { - key := client.ObjectKey{ - Name: m.GetName(), - Namespace: m.GetNamespace(), - } - if err := cl.Get(context.Background(), key, m); err != nil { - return fmt.Errorf("failed to get manifest: %w", err) - } - - gvk, err := cl.GroupVersionKindFor(m) - if err != nil { - return fmt.Errorf("failed to get GVK for manifest: %w", err) - } - fileName := fmt.Sprintf("%s-%s-%s.yaml", gvk.Kind, m.GetNamespace(), m.GetName()) - objData, err := yaml.Marshal(m) - if err != nil { - errMsg := fmt.Sprintf("failed to create infrastructure manifest %s from InstallConfig", fileName) - return errors.Wrapf(err, errMsg) - } - c.FileList = append(c.FileList, &asset.File{ - Filename: fileName, - Data: objData, - }) - } - - logrus.Infof("Cluster API resources have been created. Waiting for cluster to become ready...") - return nil -} - // Files returns the FileList generated by the asset. func (c *Cluster) Files() []*asset.File { return c.FileList diff --git a/pkg/asset/cluster/metadata.go b/pkg/asset/cluster/metadata.go index a029e10228d..add8843083d 100644 --- a/pkg/asset/cluster/metadata.go +++ b/pkg/asset/cluster/metadata.go @@ -2,8 +2,6 @@ package cluster import ( "encoding/json" - "os" - "path/filepath" "github.com/pkg/errors" @@ -16,6 +14,7 @@ import ( "github.com/openshift/installer/pkg/asset/cluster/gcp" "github.com/openshift/installer/pkg/asset/cluster/ibmcloud" "github.com/openshift/installer/pkg/asset/cluster/libvirt" + clustermetadata "github.com/openshift/installer/pkg/asset/cluster/metadata" "github.com/openshift/installer/pkg/asset/cluster/nutanix" "github.com/openshift/installer/pkg/asset/cluster/openstack" "github.com/openshift/installer/pkg/asset/cluster/ovirt" @@ -41,10 +40,6 @@ import ( vspheretypes "github.com/openshift/installer/pkg/types/vsphere" ) -const ( - metadataFileName = "metadata.json" -) - // Metadata contains information needed to destroy clusters. type Metadata struct { File *asset.File @@ -123,7 +118,7 @@ func (m *Metadata) Generate(parents asset.Parents) (err error) { } m.File = &asset.File{ - Filename: metadataFileName, + Filename: clustermetadata.FileName, Data: data, } @@ -143,19 +138,3 @@ func (m *Metadata) Files() []*asset.File { func (m *Metadata) Load(f asset.FileFetcher) (found bool, err error) { return false, nil } - -// LoadMetadata loads the cluster metadata from an asset directory. -func LoadMetadata(dir string) (*types.ClusterMetadata, error) { - path := filepath.Join(dir, metadataFileName) - raw, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - var metadata *types.ClusterMetadata - if err = json.Unmarshal(raw, &metadata); err != nil { - return nil, errors.Wrapf(err, "failed to Unmarshal data from %q to types.ClusterMetadata", path) - } - - return metadata, err -} diff --git a/pkg/asset/cluster/metadata/metadata.go b/pkg/asset/cluster/metadata/metadata.go new file mode 100644 index 00000000000..9160036f906 --- /dev/null +++ b/pkg/asset/cluster/metadata/metadata.go @@ -0,0 +1,30 @@ +package metadata + +import ( + "encoding/json" + "os" + "path/filepath" + + "github.com/openshift/installer/pkg/types" + "github.com/pkg/errors" +) + +const ( + FileName = "metadata.json" +) + +// LoadMetadata loads the cluster metadata from an asset directory. +func Load(dir string) (*types.ClusterMetadata, error) { + path := filepath.Join(dir, FileName) + raw, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var metadata *types.ClusterMetadata + if err = json.Unmarshal(raw, &metadata); err != nil { + return nil, errors.Wrapf(err, "failed to Unmarshal data from %q to types.ClusterMetadata", path) + } + + return metadata, err +} diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars/tfvars.go similarity index 99% rename from pkg/asset/cluster/tfvars.go rename to pkg/asset/cluster/tfvars/tfvars.go index 35ead2ec569..fb4ddb317e4 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars/tfvars.go @@ -1,4 +1,4 @@ -package cluster +package tfvars import ( "context" diff --git a/pkg/asset/installconfig/aws/route53.go b/pkg/asset/installconfig/aws/route53.go index 0317e9748be..848b728d73f 100644 --- a/pkg/asset/installconfig/aws/route53.go +++ b/pkg/asset/installconfig/aws/route53.go @@ -148,35 +148,70 @@ func GetR53ClientCfg(sess *awss.Session, roleARN string) *aws.Config { } // CreateOrUpdateRecord Creates or Updates the Route53 Record for the cluster endpoint. -func (c *Client) CreateOrUpdateRecord(ic *types.InstallConfig, target string, cfg *aws.Config) error { +func (c *Client) CreateOrUpdateRecord(ic *types.InstallConfig, cfg *aws.Config, target, intTarget, phzID string) error { zone, err := c.GetBaseDomain(ic.BaseDomain) if err != nil { return err } - params := &route53.ChangeResourceRecordSetsInput{ + svc := route53.New(c.ssn, cfg) + + apiParams := &route53.ChangeResourceRecordSetsInput{ ChangeBatch: &route53.ChangeBatch{ Comment: aws.String(fmt.Sprintf("Creating record for api and api-int in domain %s", ic.ClusterDomain())), }, HostedZoneId: zone.Id, } - for _, prefix := range []string{"api", "api-int"} { - params.ChangeBatch.Changes = append(params.ChangeBatch.Changes, &route53.Change{ - Action: aws.String("UPSERT"), - ResourceRecordSet: &route53.ResourceRecordSet{ - Name: aws.String(fmt.Sprintf("%s.%s.", prefix, ic.ClusterDomain())), - Type: aws.String("A"), - AliasTarget: &route53.AliasTarget{ - DNSName: aws.String(target), - HostedZoneId: aws.String(hostedZoneIDPerRegionNLBMap[ic.AWS.Region]), - EvaluateTargetHealth: aws.Bool(true), - }, + + apiParams.ChangeBatch.Changes = append(apiParams.ChangeBatch.Changes, &route53.Change{ + Action: aws.String("UPSERT"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: aws.String(fmt.Sprintf("%s.%s.", "api", ic.ClusterDomain())), + Type: aws.String("A"), + AliasTarget: &route53.AliasTarget{ + DNSName: aws.String(target), + HostedZoneId: aws.String(hostedZoneIDPerRegionNLBMap[ic.AWS.Region]), + EvaluateTargetHealth: aws.Bool(false), }, - }) + }, + }) + + if _, err := svc.ChangeResourceRecordSets(apiParams); err != nil { + return fmt.Errorf("failed to create records for api: %w", err) } - svc := route53.New(c.ssn, cfg) - if _, err := svc.ChangeResourceRecordSets(params); err != nil { - return fmt.Errorf("failed to create records for api/api-int: %w", err) + + apiIntParams := &route53.ChangeResourceRecordSetsInput{ + ChangeBatch: &route53.ChangeBatch{ + Comment: aws.String(fmt.Sprintf("Creating record for api and api-int in domain %s", ic.ClusterDomain())), + }, + HostedZoneId: aws.String(phzID), + } + apiIntParams.ChangeBatch.Changes = append(apiIntParams.ChangeBatch.Changes, &route53.Change{ + Action: aws.String("UPSERT"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: aws.String(fmt.Sprintf("%s.%s.", "api", ic.ClusterDomain())), + Type: aws.String("A"), + AliasTarget: &route53.AliasTarget{ + DNSName: aws.String(intTarget), + HostedZoneId: aws.String(hostedZoneIDPerRegionNLBMap[ic.AWS.Region]), + EvaluateTargetHealth: aws.Bool(false), + }, + }, + }) + apiIntParams.ChangeBatch.Changes = append(apiIntParams.ChangeBatch.Changes, &route53.Change{ + Action: aws.String("UPSERT"), + ResourceRecordSet: &route53.ResourceRecordSet{ + Name: aws.String(fmt.Sprintf("%s.%s.", "api-int", ic.ClusterDomain())), + Type: aws.String("A"), + AliasTarget: &route53.AliasTarget{ + DNSName: aws.String(intTarget), + HostedZoneId: aws.String(hostedZoneIDPerRegionNLBMap[ic.AWS.Region]), + EvaluateTargetHealth: aws.Bool(false), + }, + }, + }) + if _, err := svc.ChangeResourceRecordSets(apiIntParams); err != nil { + return fmt.Errorf("failed to create records for api-int: %w", err) } return nil } diff --git a/pkg/asset/machines/aws/machines.go b/pkg/asset/machines/aws/machines.go index 3a6cc49d67c..facaebe73e8 100644 --- a/pkg/asset/machines/aws/machines.go +++ b/pkg/asset/machines/aws/machines.go @@ -111,10 +111,15 @@ func Machines(clusterID string, region string, subnets map[string]string, pool * } if subnet == "" { domain.Subnet.Type = machinev1.AWSFiltersReferenceType - domain.Subnet.Filters = &[]machinev1.AWSResourceFilter{{ - Name: "tag:Name", - Values: []string{fmt.Sprintf("%s-private-%s", clusterID, zone)}, - }} + domain.Subnet.Filters = &[]machinev1.AWSResourceFilter{ + { + Name: "tag:Name", + Values: []string{ + fmt.Sprintf("%s-private-%s", clusterID, zone), // legacy Terraform config, TODO remove + fmt.Sprintf("%s-subnet-private-%s", clusterID, zone), + }, + }, + } } else { domain.Subnet.Type = machinev1.AWSIDReferenceType domain.Subnet.ID = pointer.String(subnet) @@ -178,12 +183,34 @@ func provider(in *machineProviderInput) (*machineapi.AWSMachineProviderConfig, e return nil, errors.Wrap(err, "failed to create machineapi.TagSpecifications from UserTags") } - securityGroups := []machineapi.AWSResourceReference{{ - Filters: []machineapi.Filter{{ + sgFilters := []machineapi.Filter{ + { Name: "tag:Name", - Values: []string{fmt.Sprintf("%s-%s-sg", in.clusterID, in.role)}, - }}, - }} + Values: []string{fmt.Sprintf("%s-%s-sg", in.clusterID, in.role)}, // legacy TF config, TODO remove + }, + { + Name: "tag:Name", + Values: []string{fmt.Sprintf("%s-node", in.clusterID)}, + }, + { + Name: "tag:Name", + Values: []string{fmt.Sprintf("%s-lb", in.clusterID)}, + }, + } + + if in.role == "master" { + cpFilter := machineapi.Filter{ + Name: "tag:Name", + Values: []string{fmt.Sprintf("%s-controlplane", in.clusterID)}} + sgFilters = append(sgFilters, cpFilter) + } + + securityGroups := []machineapi.AWSResourceReference{} + for _, filter := range sgFilters { + securityGroups = append(securityGroups, machineapi.AWSResourceReference{ + Filters: []machineapi.Filter{filter}, + }) + } securityGroupsIn := []machineapi.AWSResourceReference{} for _, sgID := range in.securityGroupIDs { sgID := sgID @@ -223,17 +250,24 @@ func provider(in *machineProviderInput) (*machineapi.AWSMachineProviderConfig, e SecurityGroups: securityGroups, } - subnetName := fmt.Sprintf("%s-private-%s", in.clusterID, in.zone) + visibility := "private" if in.publicSubnet { config.PublicIP = pointer.Bool(in.publicSubnet) - subnetName = fmt.Sprintf("%s-public-%s", in.clusterID, in.zone) + visibility = "public" + } + + subnetFilters := []machineapi.Filter{ + { + Name: "tag:Name", + Values: []string{ + fmt.Sprintf("%s-%s-%s", in.clusterID, visibility, in.zone), + fmt.Sprintf("%s-subnet-%s-%s", in.clusterID, visibility, in.zone), // legacy TF config, TODO remove + }, + }, } if in.subnet == "" { - config.Subnet.Filters = []machineapi.Filter{{ - Name: "tag:Name", - Values: []string{subnetName}, - }} + config.Subnet.Filters = subnetFilters } else { config.Subnet.ID = pointer.String(in.subnet) } diff --git a/pkg/asset/manifests/aws/cluster.go b/pkg/asset/manifests/aws/cluster.go index 086358ae708..79b12d4864f 100644 --- a/pkg/asset/manifests/aws/cluster.go +++ b/pkg/asset/manifests/aws/cluster.go @@ -137,6 +137,13 @@ func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID ToPort: 10259, SourceSecurityGroupRoles: []capa.SecurityGroupRole{"controlplane", "node"}, }, + { + Description: "public api", //TODO(padillon): this should only be on public/external clusters + Protocol: capa.SecurityGroupProtocolTCP, + FromPort: 6443, + ToPort: 6443, + CidrBlocks: []string{"0.0.0.0/0"}, + }, { Description: "SSH everyone", Protocol: capa.SecurityGroupProtocolTCP, @@ -151,9 +158,9 @@ func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID PresignedURLDuration: &metav1.Duration{Duration: 1 * time.Hour}, }, ControlPlaneLoadBalancer: &capa.AWSLoadBalancerSpec{ - Name: ptr.To(clusterID.InfraID + "-ext"), + Name: ptr.To(clusterID.InfraID + "-int"), LoadBalancerType: capa.LoadBalancerTypeNLB, - Scheme: &capa.ELBSchemeInternetFacing, + Scheme: &capa.ELBSchemeInternal, AdditionalListeners: []capa.AdditionalListenerSpec{ { Port: 22623, @@ -161,6 +168,7 @@ func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID }, }, }, + AdditionalTags: capa.Tags{fmt.Sprintf("kubernetes.io/cluster/%s", clusterID.InfraID): "owned"}, }, } diff --git a/pkg/asset/manifests/clusterapi/cluster.go b/pkg/asset/manifests/clusterapi/cluster.go index ebcd119f835..c5afc963049 100644 --- a/pkg/asset/manifests/clusterapi/cluster.go +++ b/pkg/asset/manifests/clusterapi/cluster.go @@ -15,8 +15,6 @@ import ( "sigs.k8s.io/yaml" "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/ignition/bootstrap" - "github.com/openshift/installer/pkg/asset/ignition/machine" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/machines" "github.com/openshift/installer/pkg/asset/manifests" @@ -53,8 +51,6 @@ func (c *Cluster) Dependencies() []asset.Asset { &installconfig.ClusterID{}, &openshiftinstall.Config{}, &manifests.FeatureGate{}, - &bootstrap.Bootstrap{}, - &machine.Master{}, new(rhcos.Image), } } @@ -65,10 +61,8 @@ func (c *Cluster) Generate(dependencies asset.Parents) error { clusterID := &installconfig.ClusterID{} openshiftInstall := &openshiftinstall.Config{} featureGate := &manifests.FeatureGate{} - bootstrapIgnAsset := &bootstrap.Bootstrap{} - masterIgnAsset := &machine.Master{} rhcosImage := new(rhcos.Image) - dependencies.Get(installConfig, clusterID, openshiftInstall, bootstrapIgnAsset, masterIgnAsset, featureGate, rhcosImage) + dependencies.Get(installConfig, clusterID, openshiftInstall, featureGate, rhcosImage) // If the feature gate is not enabled, do not generate any manifests. if !capiutils.IsEnabled(installConfig) { @@ -97,49 +91,6 @@ func (c *Cluster) Generate(dependencies asset.Parents) error { } c.FileList = append(c.FileList, &asset.RuntimeFile{Object: cluster, File: asset.File{Filename: "01_capi-cluster.yaml"}}) - // Gather the ignition files, and store them in a secret. - { - masterIgn := string(masterIgnAsset.Files()[0].Data) - bootstrapIgn, err := injectInstallInfo(bootstrapIgnAsset.Files()[0].Data) - if err != nil { - return errors.Wrap(err, "unable to inject installation info") - } - c.FileList = append(c.FileList, - &asset.RuntimeFile{ - File: asset.File{Filename: "01_ignition-secret-master.yaml"}, - Object: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", clusterID.InfraID, "master"), - Namespace: capiutils.Namespace, - Labels: map[string]string{ - "cluster.x-k8s.io/cluster-name": clusterID.InfraID, - }, - }, - Data: map[string][]byte{ - "format": []byte("ignition"), - "value": []byte(masterIgn), - }, - }, - }, - &asset.RuntimeFile{ - File: asset.File{Filename: "01_ignition-secret-bootstrap.yaml"}, - Object: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", clusterID.InfraID, "bootstrap"), - Namespace: capiutils.Namespace, - Labels: map[string]string{ - "cluster.x-k8s.io/cluster-name": clusterID.InfraID, - }, - }, - Data: map[string][]byte{ - "format": []byte("ignition"), - "value": []byte(bootstrapIgn), - }, - }, - }, - ) - } - var out *capiutils.GenerateClusterAssetsOutput switch platform := installConfig.Config.Platform.Name(); platform { case "aws": diff --git a/pkg/asset/targets/targets.go b/pkg/asset/targets/targets.go index a4e9e64c588..960d58eb1e6 100644 --- a/pkg/asset/targets/targets.go +++ b/pkg/asset/targets/targets.go @@ -3,6 +3,7 @@ package targets import ( "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/cluster" + "github.com/openshift/installer/pkg/asset/cluster/tfvars" "github.com/openshift/installer/pkg/asset/ignition/bootstrap" "github.com/openshift/installer/pkg/asset/ignition/machine" "github.com/openshift/installer/pkg/asset/installconfig" @@ -66,7 +67,7 @@ var ( &cluster.Metadata{}, &machine.MasterIgnitionCustomizations{}, &machine.WorkerIgnitionCustomizations{}, - &cluster.TerraformVariables{}, + &tfvars.TerraformVariables{}, &kubeconfig.AdminClient{}, &password.KubeadminPassword{}, &tls.JournalCertKey{}, diff --git a/pkg/destroy/bootstrap/bootstrap.go b/pkg/destroy/bootstrap/bootstrap.go index ef94938f2f0..841d7d10d6d 100644 --- a/pkg/destroy/bootstrap/bootstrap.go +++ b/pkg/destroy/bootstrap/bootstrap.go @@ -14,7 +14,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" configv1 "github.com/openshift/api/config/v1" - "github.com/openshift/installer/pkg/asset/cluster" + "github.com/openshift/installer/pkg/asset/cluster/metadata" openstackasset "github.com/openshift/installer/pkg/asset/cluster/openstack" "github.com/openshift/installer/pkg/asset/manifests/capiutils" "github.com/openshift/installer/pkg/clusterapi" @@ -30,7 +30,7 @@ import ( // Destroy uses Terraform to remove bootstrap resources. func Destroy(ctx context.Context, dir string) (err error) { - metadata, err := cluster.LoadMetadata(dir) + metadata, err := metadata.Load(dir) if err != nil { return err } diff --git a/pkg/destroy/destroyer.go b/pkg/destroy/destroyer.go index 88b7e60f6c9..9086f05c7cb 100644 --- a/pkg/destroy/destroyer.go +++ b/pkg/destroy/destroyer.go @@ -4,13 +4,13 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/openshift/installer/pkg/asset/cluster" + "github.com/openshift/installer/pkg/asset/cluster/metadata" "github.com/openshift/installer/pkg/destroy/providers" ) // New returns a Destroyer based on `metadata.json` in `rootDir`. func New(logger logrus.FieldLogger, rootDir string) (providers.Destroyer, error) { - metadata, err := cluster.LoadMetadata(rootDir) + metadata, err := metadata.Load(rootDir) if err != nil { return nil, err } diff --git a/pkg/gather/gather.go b/pkg/gather/gather.go index d4a6000c46f..0cde260a480 100644 --- a/pkg/gather/gather.go +++ b/pkg/gather/gather.go @@ -11,13 +11,13 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/openshift/installer/pkg/asset/cluster" + "github.com/openshift/installer/pkg/asset/cluster/metadata" "github.com/openshift/installer/pkg/gather/providers" ) // New returns a Gather based on `metadata.json` in `rootDir`. func New(logger logrus.FieldLogger, serialLogBundle string, bootstrap string, masters []string, rootDir string) (providers.Gather, error) { - metadata, err := cluster.LoadMetadata(rootDir) + metadata, err := metadata.Load(rootDir) if err != nil { return nil, err } diff --git a/pkg/infrastructure/aws/aws.go b/pkg/infrastructure/aws/aws.go index 6d141814d2a..6d8eb885e7a 100644 --- a/pkg/infrastructure/aws/aws.go +++ b/pkg/infrastructure/aws/aws.go @@ -20,6 +20,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "github.com/openshift/installer/pkg/asset" + tfvarsAsset "github.com/openshift/installer/pkg/asset/cluster/tfvars" awssession "github.com/openshift/installer/pkg/asset/installconfig/aws" "github.com/openshift/installer/pkg/infrastructure" "github.com/openshift/installer/pkg/tfvars" @@ -58,17 +59,16 @@ type output struct { PrivateSubnetIDs []string `json:"private_subnet_ids,omitempty"` } -// Provision creates the infrastructure resources for the stage. -// dir: the path of the install dir -// vars: cluster configuration input variables, such as terraform variables files -// returns a slice of File assets, which will be appended to the cluster asset file list. -func (a InfraProvider) Provision(dir string, vars []*asset.File) ([]*asset.File, error) { +// Provision creates cluster infrastructure using AWS SDK calls. +func (a InfraProvider) Provision(dir string, parents asset.Parents) ([]*asset.File, error) { + terraformVariables := &tfvarsAsset.TerraformVariables{} + parents.Get(terraformVariables) // Unmarshall input from tf variables, so we can use it along with // installConfig and other assets as the contractual input regardless of // the implementation. clusterConfig := &tfvars.Config{} clusterAWSConfig := &awstfvars.Config{} - for _, file := range vars { + for _, file := range terraformVariables.Files() { switch file.Filename { case tfVarsFileName: if err := json.Unmarshal(file.Data, clusterConfig); err != nil { diff --git a/pkg/infrastructure/aws/clusterapi/clusterapi.go b/pkg/infrastructure/aws/clusterapi/clusterapi.go new file mode 100644 index 00000000000..31ff0b77d9d --- /dev/null +++ b/pkg/infrastructure/aws/clusterapi/clusterapi.go @@ -0,0 +1,155 @@ +package clusterapi + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/pkg/errors" + capa "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/installer/pkg/asset/manifests/capiutils" + "github.com/openshift/installer/pkg/infrastructure/clusterapi" +) + +type Provider struct { + clusterapi.DefaultCAPIProvider +} + +func (p Provider) PreProvision(in clusterapi.PreProvisionInput) error { + // TODO(padillon): skip if users bring their own roles + if err := putIAMRoles(in.ClusterID, in.InstallConfig); err != nil { + return fmt.Errorf("failed to create IAM roles: %w", err) + } + return nil +} + +func (p Provider) ControlPlaneAvailable(in clusterapi.ControlPlaneAvailableInput) error { + awsCluster := &capa.AWSCluster{} + key := client.ObjectKey{ + Name: in.InfraID, + Namespace: capiutils.Namespace, + } + if err := in.Client.Get(context.Background(), key, awsCluster); err != nil { + return fmt.Errorf("failed to get AWSCluster: %w", err) + } + awsSession, err := in.InstallConfig.AWS.Session(context.TODO()) + if err != nil { + return fmt.Errorf("failed to get session to create load balancer: %w", err) + } + subnetIDs := []string{} + for _, s := range awsCluster.Spec.NetworkSpec.Subnets { + if s.IsPublic { + subnetIDs = append(subnetIDs, s.ResourceID) + } + } + + var vpcID string + var lastError error + ec2Client := ec2.New(awsSession, aws.NewConfig().WithRegion(awsCluster.Spec.Region)) + err = ec2Client.DescribeSubnetsPagesWithContext( + context.TODO(), + &ec2.DescribeSubnetsInput{SubnetIds: []*string{aws.String(subnetIDs[0])}}, //TODO ensure no segfault + func(results *ec2.DescribeSubnetsOutput, lastPage bool) bool { + for _, subnet := range results.Subnets { + if subnet.SubnetId == nil { + continue + } + if subnet.SubnetArn == nil { + lastError = errors.Errorf("%s has no ARN", *subnet.SubnetId) + return false + } + if subnet.VpcId == nil { + lastError = errors.Errorf("%s has no VPC", *subnet.SubnetId) + return false + } + if subnet.AvailabilityZone == nil { + lastError = errors.Errorf("%s has not availability zone", *subnet.SubnetId) + return false + } + vpcID = *subnet.VpcId + } + return !lastPage + }, + ) + if err == nil { + err = lastError + } + if err != nil { + return fmt.Errorf("error getting VPC ID: %w", err) + } + + tags := map[string]string{ + fmt.Sprintf("kubernetes.io/cluster/%s", in.InfraID): "owned", + } + for k, v := range awsCluster.Spec.AdditionalTags { + tags[k] = v + } + + elbClient := elbv2.New(awsSession) + + //TODO(padillon): support shared vpc (assume role client) + r53Client := route53.New(awsSession) + phz, err := createHostedZone(context.TODO(), r53Client, tags, in.InfraID, in.InstallConfig.Config.ClusterDomain(), vpcID, awsCluster.Spec.Region, true) + if err != nil { + return fmt.Errorf("failed to create private hosted zone: %w", err) + } + + lb, aextTG, err := createExtLB(elbClient, subnetIDs, tags, in.InfraID, vpcID) + if err != nil { + return fmt.Errorf("error creating external LB: %w", err) + } + + ids, err := getControlPlaneIDs(in.Client, in.InstallConfig.Config.ControlPlane.Replicas, in.InfraID) + if err != nil { + return fmt.Errorf("error getting control plane IP addresses") + } + + err = registerControlPlane(elbClient, ids, aextTG) + if err != nil { + return fmt.Errorf("error registering control plane to api-int target group: %w", err) + } + + if err := createDNSRecords(in.InstallConfig, *lb.DNSName, in.Cluster.Spec.ControlPlaneEndpoint.Host, *phz.Id); err != nil { + return fmt.Errorf("failed to create DNS records: %w", err) + } + + return nil +} + +func getControlPlaneIDs(cl client.Client, replicas *int64, infraID string) ([]*string, error) { + res := []*string{} + total := int64(1) + if replicas != nil { + total = *replicas + } + for i := int64(0); i < total; i++ { + + key := client.ObjectKey{ + Name: fmt.Sprintf("%s-master-%d", infraID, i), + Namespace: capiutils.Namespace, + } + awsMachine := &capa.AWSMachine{} + if err := cl.Get(context.Background(), key, awsMachine); err != nil { + return nil, fmt.Errorf("failed to get AWSCluster: %w", err) + } + res = append(res, awsMachine.Spec.InstanceID) + } + + key := client.ObjectKey{ + Name: capiutils.GenerateBoostrapMachineName(infraID), + Namespace: capiutils.Namespace, + } + awsMachine := &capa.AWSMachine{} + if err := cl.Get(context.Background(), key, awsMachine); err != nil { + return nil, fmt.Errorf("failed to get AWSCluster: %w", err) + } + + res = append(res, awsMachine.Spec.InstanceID) + + return res, nil +} diff --git a/pkg/infrastructure/aws/clusterapi/dns.go b/pkg/infrastructure/aws/clusterapi/dns.go new file mode 100644 index 00000000000..703f77e3448 --- /dev/null +++ b/pkg/infrastructure/aws/clusterapi/dns.go @@ -0,0 +1,163 @@ +package clusterapi + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/route53/route53iface" + "github.com/sirupsen/logrus" + + "github.com/openshift/installer/pkg/asset/installconfig" + awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" +) + +func createDNSRecords(installConfig *installconfig.InstallConfig, apiTarget, apiIntTarget, phzID string) error { + logrus.Infof("Creating Route53 records for control plane load balancer") + ssn, err := installConfig.AWS.Session(context.TODO()) + if err != nil { + return fmt.Errorf("failed to create session: %w", err) + } + client := awsconfig.NewClient(ssn) + r53cfg := awsconfig.GetR53ClientCfg(ssn, installConfig.Config.AWS.HostedZoneRole) + err = client.CreateOrUpdateRecord(installConfig.Config, r53cfg, apiTarget, apiIntTarget, phzID) + if err != nil { + return fmt.Errorf("failed to create route53 records: %w", err) + } + logrus.Infof("Created Route53 records for control plane load balancer") + return nil +} + +func createHostedZone(ctx context.Context, client route53iface.Route53API, userTags map[string]string, infraID, name, vpcID, region string, isPrivate bool) (*route53.HostedZone, error) { + var res *route53.CreateHostedZoneOutput + + callRef := fmt.Sprintf("%d", time.Now().Unix()) + res, err := client.CreateHostedZoneWithContext(ctx, &route53.CreateHostedZoneInput{ + CallerReference: aws.String(callRef), + Name: aws.String(name), + HostedZoneConfig: &route53.HostedZoneConfig{ + PrivateZone: aws.Bool(isPrivate), + Comment: aws.String("Created by Openshift Installer"), + }, + VPC: &route53.VPC{ + VPCId: aws.String(vpcID), + VPCRegion: aws.String(region), + }, + }) + if err != nil { + return nil, fmt.Errorf("error creating private hosted zone: %w", err) + } + + if res == nil { + return nil, fmt.Errorf("unexpected output from hosted zone creation") + } + // Tag the hosted zone + tags := mergeTags(userTags, map[string]string{"Name": fmt.Sprintf("%s-int", infraID)}) + _, err = client.ChangeTagsForResourceWithContext(ctx, &route53.ChangeTagsForResourceInput{ + ResourceType: aws.String("hostedzone"), + ResourceId: res.HostedZone.Id, + AddTags: r53Tags(tags), + }) + if err != nil { + return nil, fmt.Errorf("failed to tag private hosted zone: %w", err) + } + logrus.Infoln("Tagged private hosted zone") + + // Set SOA minimum TTL + recordSet, err := existingRecordSet(ctx, client, res.HostedZone.Id, name, "SOA") + if err != nil { + return nil, fmt.Errorf("failed to find SOA record set for private zone: %w", err) + } + if len(recordSet.ResourceRecords) == 0 || recordSet.ResourceRecords[0] == nil || recordSet.ResourceRecords[0].Value == nil { + return nil, fmt.Errorf("failed to find SOA record for private zone") + } + record := recordSet.ResourceRecords[0] + fields := strings.Split(aws.StringValue(record.Value), " ") + if len(fields) != 7 { + return nil, fmt.Errorf("SOA record value has %d fields, expected 7", len(fields)) + } + fields[6] = "60" + record.Value = aws.String(strings.Join(fields, " ")) + _, err = client.ChangeResourceRecordSetsWithContext(ctx, &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: res.HostedZone.Id, + ChangeBatch: &route53.ChangeBatch{ + Changes: []*route53.Change{ + { + Action: aws.String("UPSERT"), + ResourceRecordSet: recordSet, + }, + }, + }, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to set SOA TTL to minimum: %w", err) + } + + return res.HostedZone, nil +} + +func mergeTags(lhsTags, rhsTags map[string]string) map[string]string { + merged := make(map[string]string, len(lhsTags)+len(rhsTags)) + for k, v := range lhsTags { + merged[k] = v + } + for k, v := range rhsTags { + merged[k] = v + } + return merged +} + +func r53Tags(tags map[string]string) []*route53.Tag { + rtags := make([]*route53.Tag, 0, len(tags)) + for k, v := range tags { + k, v := k, v + rtags = append(rtags, &route53.Tag{ + Key: aws.String(k), + Value: aws.String(v), + }) + } + return rtags +} + +func existingRecordSet(ctx context.Context, client route53iface.Route53API, zoneID *string, recordName string, recordType string) (*route53.ResourceRecordSet, error) { + name := fqdn(strings.ToLower(recordName)) + input := &route53.ListResourceRecordSetsInput{ + HostedZoneId: zoneID, + StartRecordName: aws.String(name), + StartRecordType: aws.String(recordType), + MaxItems: aws.String("1"), + } + res, err := client.ListResourceRecordSetsWithContext(ctx, input) + if err != nil { + return nil, err + } + for _, rs := range res.ResourceRecordSets { + resName := strings.ToLower(cleanRecordName(aws.StringValue(rs.Name))) + resType := strings.ToUpper(aws.StringValue(rs.Type)) + if resName == name && resType == recordType { + return rs, nil + } + } + return nil, fmt.Errorf("not found") +} + +func fqdn(name string) string { + n := len(name) + if n == 0 || name[n-1] == '.' { + return name + } + return name + "." +} + +func cleanRecordName(name string) string { + s, err := strconv.Unquote(`"` + name + `"`) + if err != nil { + return name + } + return s +} diff --git a/pkg/infrastructure/aws/clusterapi/iam.go b/pkg/infrastructure/aws/clusterapi/iam.go new file mode 100644 index 00000000000..e4b48f78126 --- /dev/null +++ b/pkg/infrastructure/aws/clusterapi/iam.go @@ -0,0 +1,188 @@ +package clusterapi + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" + + "github.com/openshift/installer/pkg/asset/installconfig" +) + +var ( + policies = map[string]*iamv1.PolicyDocument{ + "master": { + Version: "2012-10-17", + Statement: []iamv1.StatementEntry{ + { + Effect: "Allow", + Action: []string{ + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:Describe*", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:Describe*", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "kms:DescribeKey", + }, + Resource: iamv1.Resources{ + "*", + }, + }, + }, + }, + "worker": { + Version: "2012-10-17", + Statement: []iamv1.StatementEntry{ + { + Effect: "Allow", + Action: iamv1.Actions{ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + }, + Resource: iamv1.Resources{"*"}, + }, + }, + }, + } +) + +// putIAMRoles creates the roles used by control-plane and compute nodes. +func putIAMRoles(clusterID string, ic *installconfig.InstallConfig) error { + logrus.Infoln("Creating IAM roles for control-plane and compute nodes") + // Create the IAM Role with the aws sdk. + // https://docs.aws.amazon.com/sdk-for-go/api/service/iam/#IAM.CreateRole + session, err := ic.AWS.Session(context.TODO()) + if err != nil { + return errors.Wrap(err, "failed to load AWS session") + } + svc := iam.New(session) + + // Create the IAM Roles for master and workers. + clusterOwnedIAMTag := &iam.Tag{ + Key: aws.String(fmt.Sprintf("kubernetes.io/cluster/%s", clusterID)), + Value: aws.String("owned"), + } + assumePolicy := &iamv1.PolicyDocument{ + Version: "2012-10-17", + Statement: iamv1.Statements{ + { + Effect: "Allow", + Principal: iamv1.Principals{ + iamv1.PrincipalService: []string{ + "ec2.amazonaws.com", + }, + }, + Action: iamv1.Actions{ + "sts:AssumeRole", + }, + }, + }, + } + assumePolicyBytes, err := json.Marshal(assumePolicy) + if err != nil { + return errors.Wrap(err, "failed to marshal assume policy") + } + + for _, role := range []string{"master", "worker"} { + roleName := aws.String(fmt.Sprintf("%s-%s-role", clusterID, role)) + if _, err := svc.GetRole(&iam.GetRoleInput{RoleName: roleName}); err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() != iam.ErrCodeNoSuchEntityException { + return errors.Wrapf(err, "failed to get %s role", role) + } + // If the role does not exist, create it. + createRoleInput := &iam.CreateRoleInput{ + RoleName: roleName, + AssumeRolePolicyDocument: aws.String(string(assumePolicyBytes)), + Tags: []*iam.Tag{clusterOwnedIAMTag}, + } + if _, err := svc.CreateRole(createRoleInput); err != nil { + return errors.Wrapf(err, "failed to create %s role", role) + } + time.Sleep(10 * time.Second) + if err := svc.WaitUntilRoleExists(&iam.GetRoleInput{RoleName: roleName}); err != nil { + return errors.Wrapf(err, "failed to wait for %s role to exist", role) + } + } + + // Put the policy inline. + policyName := aws.String(fmt.Sprintf("%s-%s-policy", clusterID, role)) + b, err := json.Marshal(policies[role]) + if err != nil { + return errors.Wrapf(err, "failed to marshal %s policy", role) + } + if _, err := svc.PutRolePolicy(&iam.PutRolePolicyInput{ + PolicyDocument: aws.String(string(b)), + PolicyName: policyName, + RoleName: roleName, + }); err != nil { + return errors.Wrapf(err, "failed to create inline policy for role %s ", role) + } + + profileName := aws.String(fmt.Sprintf("%s-%s-profile", clusterID, role)) + if _, err := svc.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: profileName}); err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() != iam.ErrCodeNoSuchEntityException { + return errors.Wrapf(err, "failed to get %s instance profile", role) + } + // If the profile does not exist, create it. + if _, err := svc.CreateInstanceProfile(&iam.CreateInstanceProfileInput{ + InstanceProfileName: profileName, + Tags: []*iam.Tag{clusterOwnedIAMTag}, + }); err != nil { + return errors.Wrapf(err, "failed to create %s instance profile", role) + } + time.Sleep(10 * time.Second) + if err := svc.WaitUntilInstanceProfileExists(&iam.GetInstanceProfileInput{InstanceProfileName: profileName}); err != nil { + return errors.Wrapf(err, "failed to wait for %s role to exist", role) + } + + // Finally, attach the role to the profile. + if _, err := svc.AddRoleToInstanceProfile(&iam.AddRoleToInstanceProfileInput{ + InstanceProfileName: profileName, + RoleName: roleName, + }); err != nil { + return errors.Wrapf(err, "failed to add %s role to instance profile", role) + } + } + } + + return nil +} diff --git a/pkg/infrastructure/aws/clusterapi/loadbalancer.go b/pkg/infrastructure/aws/clusterapi/loadbalancer.go new file mode 100644 index 00000000000..6dfb7c160dc --- /dev/null +++ b/pkg/infrastructure/aws/clusterapi/loadbalancer.go @@ -0,0 +1,65 @@ +package clusterapi + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/elbv2/elbv2iface" + awsInfra "github.com/openshift/installer/pkg/infrastructure/aws" + "github.com/sirupsen/logrus" +) + +const ( + readyzPath = "/readyz" + healthzPath = "/healthz" + apiPort = 6443 + servicePort = 22623 +) + +func createExtLB(client elbv2iface.ELBV2API, subnets []string, tags map[string]string, infraID, vpcID string) (*elbv2.LoadBalancer, *elbv2.TargetGroup, error) { + logger := logrus.StandardLogger() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + lbName := fmt.Sprintf("%s-ext", infraID) + lb, err := awsInfra.EnsureLoadBalancer(ctx, logger, client, lbName, subnets, true, tags) + if err != nil { + return nil, nil, err + } + + // Create api-int target group + aextTGName := fmt.Sprintf("%s-aext", infraID) + aextTG, err := awsInfra.EnsureTargetGroup(ctx, logger, client, aextTGName, vpcID, readyzPath, apiPort, tags) + if err != nil { + return nil, nil, fmt.Errorf("failed to create external target group: %w", err) + } + + aextListenerName := fmt.Sprintf("%s-aext", infraID) + aextListener, err := awsInfra.CreateListener(ctx, client, aextListenerName, lb.LoadBalancerArn, aextTG.TargetGroupArn, apiPort, tags) + if err != nil { + return nil, nil, fmt.Errorf("failed to create external listener: %w", err) + } + + logger.WithField("arn", aws.StringValue(aextListener.ListenerArn)).Infoln("Created api listener") + + return lb, aextTG, nil +} + +func registerControlPlane(client elbv2iface.ELBV2API, ids []*string, tg *elbv2.TargetGroup) error { + targets := []*elbv2.TargetDescription{} + for _, id := range ids { + targets = append(targets, &elbv2.TargetDescription{Id: id}) + } + + _, err := client.RegisterTargetsWithContext(context.TODO(), &elbv2.RegisterTargetsInput{ + TargetGroupArn: tg.TargetGroupArn, + Targets: targets, + }) + if err != nil { + return fmt.Errorf("failed to register target group (%s): %w", *tg.TargetGroupName, err) + } + return nil +} diff --git a/pkg/infrastructure/aws/loadbalancer.go b/pkg/infrastructure/aws/loadbalancer.go index cf3a0bdbae6..2eef30c6e1e 100644 --- a/pkg/infrastructure/aws/loadbalancer.go +++ b/pkg/infrastructure/aws/loadbalancer.go @@ -81,14 +81,14 @@ func createLoadBalancers(ctx context.Context, logger logrus.FieldLogger, elbClie func (o *lbState) ensureInternalLoadBalancer(ctx context.Context, logger logrus.FieldLogger, client elbv2iface.ELBV2API, subnets []string, tags map[string]string) (*elbv2.LoadBalancer, error) { lbName := fmt.Sprintf("%s-int", o.input.infraID) - lb, err := ensureLoadBalancer(ctx, logger, client, lbName, subnets, false, tags) + lb, err := EnsureLoadBalancer(ctx, logger, client, lbName, subnets, false, tags) if err != nil { return nil, err } // Create internalA target group aTGName := fmt.Sprintf("%s-aint", o.input.infraID) - aTG, err := ensureTargetGroup(ctx, logger, client, aTGName, o.input.vpcID, readyzPath, apiPort, tags) + aTG, err := EnsureTargetGroup(ctx, logger, client, aTGName, o.input.vpcID, readyzPath, apiPort, tags) if err != nil { return nil, fmt.Errorf("failed to create internalA target group: %w", err) } @@ -96,7 +96,7 @@ func (o *lbState) ensureInternalLoadBalancer(ctx context.Context, logger logrus. // Create internalA listener aListenerName := fmt.Sprintf("%s-aint", o.input.infraID) - aListener, err := createListener(ctx, client, aListenerName, lb.LoadBalancerArn, aTG.TargetGroupArn, 6443, tags) + aListener, err := CreateListener(ctx, client, aListenerName, lb.LoadBalancerArn, aTG.TargetGroupArn, 6443, tags) if err != nil { return nil, fmt.Errorf("failed to create internalA listener: %w", err) } @@ -104,7 +104,7 @@ func (o *lbState) ensureInternalLoadBalancer(ctx context.Context, logger logrus. // Create internalS target group sTGName := fmt.Sprintf("%s-sint", o.input.infraID) - sTG, err := ensureTargetGroup(ctx, logger, client, sTGName, o.input.vpcID, healthzPath, servicePort, tags) + sTG, err := EnsureTargetGroup(ctx, logger, client, sTGName, o.input.vpcID, healthzPath, servicePort, tags) if err != nil { return nil, fmt.Errorf("failed to create internalS target group: %w", err) } @@ -112,7 +112,7 @@ func (o *lbState) ensureInternalLoadBalancer(ctx context.Context, logger logrus. // Create internalS listener sListenerName := fmt.Sprintf("%s-sint", o.input.infraID) - sListener, err := createListener(ctx, client, sListenerName, lb.LoadBalancerArn, sTG.TargetGroupArn, servicePort, tags) + sListener, err := CreateListener(ctx, client, sListenerName, lb.LoadBalancerArn, sTG.TargetGroupArn, servicePort, tags) if err != nil { return nil, fmt.Errorf("failed to create internalS listener: %w", err) } @@ -123,21 +123,21 @@ func (o *lbState) ensureInternalLoadBalancer(ctx context.Context, logger logrus. func (o *lbState) ensureExternalLoadBalancer(ctx context.Context, logger logrus.FieldLogger, client elbv2iface.ELBV2API, subnets []string, tags map[string]string) (*elbv2.LoadBalancer, error) { lbName := fmt.Sprintf("%s-ext", o.input.infraID) - lb, err := ensureLoadBalancer(ctx, logger, client, lbName, subnets, true, tags) + lb, err := EnsureLoadBalancer(ctx, logger, client, lbName, subnets, true, tags) if err != nil { return nil, err } // Create target group tgName := fmt.Sprintf("%s-aext", o.input.infraID) - tg, err := ensureTargetGroup(ctx, logger, client, tgName, o.input.vpcID, readyzPath, apiPort, tags) + tg, err := EnsureTargetGroup(ctx, logger, client, tgName, o.input.vpcID, readyzPath, apiPort, tags) if err != nil { return nil, fmt.Errorf("failed to create external target group: %w", err) } o.targetGroupArns.Insert(aws.StringValue(tg.TargetGroupArn)) listenerName := fmt.Sprintf("%s-aext", o.input.infraID) - listener, err := createListener(ctx, client, listenerName, lb.LoadBalancerArn, tg.TargetGroupArn, apiPort, tags) + listener, err := CreateListener(ctx, client, listenerName, lb.LoadBalancerArn, tg.TargetGroupArn, apiPort, tags) if err != nil { return nil, fmt.Errorf("failed to create external listener: %w", err) } @@ -146,7 +146,7 @@ func (o *lbState) ensureExternalLoadBalancer(ctx context.Context, logger logrus. return lb, nil } -func ensureLoadBalancer(ctx context.Context, logger logrus.FieldLogger, client elbv2iface.ELBV2API, lbName string, subnets []string, isPublic bool, tags map[string]string) (*elbv2.LoadBalancer, error) { +func EnsureLoadBalancer(ctx context.Context, logger logrus.FieldLogger, client elbv2iface.ELBV2API, lbName string, subnets []string, isPublic bool, tags map[string]string) (*elbv2.LoadBalancer, error) { l := logger.WithField("name", lbName) createdOrFoundMsg := "Found existing load balancer" lb, err := existingLoadBalancer(ctx, client, lbName) @@ -224,7 +224,7 @@ func existingLoadBalancer(ctx context.Context, client elbv2iface.ELBV2API, lbNam return nil, errNotFound } -func ensureTargetGroup(ctx context.Context, logger logrus.FieldLogger, client elbv2iface.ELBV2API, targetName string, vpcID string, healthCheckPath string, port int64, tags map[string]string) (*elbv2.TargetGroup, error) { +func EnsureTargetGroup(ctx context.Context, logger logrus.FieldLogger, client elbv2iface.ELBV2API, targetName string, vpcID string, healthCheckPath string, port int64, tags map[string]string) (*elbv2.TargetGroup, error) { l := logger.WithField("name", targetName) createdOrFoundMsg := "Found existing Target Group" tg, err := existingTargetGroup(ctx, client, targetName) @@ -278,7 +278,6 @@ func createTargetGroup(ctx context.Context, client elbv2iface.ELBV2API, targetNa Port: aws.Int64(port), Protocol: aws.String("TCP"), Tags: elbTags(ttags), - TargetType: aws.String("ip"), VpcId: aws.String(vpcID), } res, err := client.CreateTargetGroupWithContext(ctx, input) @@ -289,7 +288,7 @@ func createTargetGroup(ctx context.Context, client elbv2iface.ELBV2API, targetNa return res.TargetGroups[0], nil } -func createListener(ctx context.Context, client elbv2iface.ELBV2API, listenerName string, lbARN *string, tgARN *string, port int64, tags map[string]string) (*elbv2.Listener, error) { +func CreateListener(ctx context.Context, client elbv2iface.ELBV2API, listenerName string, lbARN *string, tgARN *string, port int64, tags map[string]string) (*elbv2.Listener, error) { ltags := mergeTags(tags, map[string]string{ "Name": listenerName, }) diff --git a/pkg/infrastructure/aws/provision_test.go b/pkg/infrastructure/aws/provision_test.go index f9ceea9ee40..64c96666038 100644 --- a/pkg/infrastructure/aws/provision_test.go +++ b/pkg/infrastructure/aws/provision_test.go @@ -3275,7 +3275,7 @@ func TestEnsureTargetGroup(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - res, err := ensureTargetGroup(context.TODO(), logger, &test.mockSvc, "tgName", "vpc-1", readyzPath, apiPort, map[string]string{}) + res, err := EnsureTargetGroup(context.TODO(), logger, &test.mockSvc, "tgName", "vpc-1", readyzPath, apiPort, map[string]string{}) if test.expectedErr == "" { assert.NoError(t, err) assert.Equal(t, test.expectedOut, res) @@ -3384,7 +3384,7 @@ func TestEnsureLoadBalancer(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - res, err := ensureLoadBalancer(context.TODO(), logger, &test.mockSvc, "lbName", []string{}, true, map[string]string{}) + res, err := EnsureLoadBalancer(context.TODO(), logger, &test.mockSvc, "lbName", []string{}, true, map[string]string{}) if test.expectedErr == "" { assert.NoError(t, err) assert.Equal(t, test.expectedOut, res) diff --git a/pkg/infrastructure/clusterapi/clusterapi.go b/pkg/infrastructure/clusterapi/clusterapi.go new file mode 100644 index 00000000000..ba334a292a8 --- /dev/null +++ b/pkg/infrastructure/clusterapi/clusterapi.go @@ -0,0 +1,322 @@ +package clusterapi + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + utilkubeconfig "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/cluster/metadata" + "github.com/openshift/installer/pkg/asset/ignition/bootstrap" + "github.com/openshift/installer/pkg/asset/ignition/machine" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/kubeconfig" + "github.com/openshift/installer/pkg/asset/manifests/capiutils" + capimanifests "github.com/openshift/installer/pkg/asset/manifests/clusterapi" + "github.com/openshift/installer/pkg/clusterapi" + "github.com/openshift/installer/pkg/infrastructure" + "github.com/openshift/installer/pkg/types" +) + +// ClusterAPIProvider is the base implementation for provisioning cluster +// infrastructure using CAPI. Platforms should embed this struct and +// implement: +// . +type InfraProvider struct { + infrastructure.Provider + + capiProvider Provider +} + +// InitializeProvider returns a CAPI provider implementation for a specific +// cloud platform. +func InitializeProvider(platform Provider) infrastructure.Provider { + return InfraProvider{capiProvider: platform} +} + +// CAPIInfraHelper provides an interface for calling functions at different +// points of the CAPI infrastructure provisioning lifecycle. +type Provider interface { + // PreProvision is called before provisioning using CAPI controllers has begun. + // and should be used to create dependencies needed for CAPI provisioning, + // such as IAM roles or policies. + PreProvision(in PreProvisionInput) error + + // Ignition generates the ignition secrets for bootstrap and control-plane machines + // and handles any preconditions for ignition. + Ignition(in IgnitionInput) ([]client.Object, error) + + // ControlPlaneAvailable is called once cluster.Spec.ControlPlaneEndpoint.IsValid() + // returns true, typically after load balancers have been provisioned. It can be used + // to create DNS records. + ControlPlaneAvailable(in ControlPlaneAvailableInput) error +} + +type PreProvisionInput struct { + ClusterID string + InstallConfig *installconfig.InstallConfig +} + +type IgnitionInput struct { + MasterIgnData, BootstrapIgnData []byte + InfraID string +} + +type ControlPlaneAvailableInput struct { + Cluster *clusterv1.Cluster + InstallConfig *installconfig.InstallConfig + Client client.Client + InfraID string +} + +// TODO(padillon: switch to pointer receiver) +// Provision creates cluster resources by applying CAPI manifests to a locally running control plane. +func (i InfraProvider) Provision(dir string, parents asset.Parents) ([]*asset.File, error) { + capiManifestsAsset := &capimanifests.Cluster{} + clusterKubeconfigAsset := &kubeconfig.AdminClient{} + clusterID := &installconfig.ClusterID{} + installConfig := &installconfig.InstallConfig{} + bootstrapIgnAsset := &bootstrap.Bootstrap{} + masterIgnAsset := &machine.Master{} + parents.Get( + capiManifestsAsset, + clusterKubeconfigAsset, + clusterID, + installConfig, + bootstrapIgnAsset, + masterIgnAsset, + ) + + fileList := []*asset.File{} + manifests := []client.Object{} + for _, m := range capiManifestsAsset.RuntimeFiles() { + manifests = append(manifests, m.Object) + } + + preProvisionInput := PreProvisionInput{ + ClusterID: clusterID.InfraID, + InstallConfig: installConfig, + } + if err := i.capiProvider.PreProvision(preProvisionInput); err != nil { + return fileList, fmt.Errorf("failed during pre-provisioning: %w", err) + } + + ignInput := IgnitionInput{ + BootstrapIgnData: bootstrapIgnAsset.Files()[0].Data, + MasterIgnData: masterIgnAsset.Files()[0].Data, + InfraID: clusterID.InfraID, + } + ignSecrets, err := i.capiProvider.Ignition(ignInput) + if err != nil { + return fileList, fmt.Errorf("failed during ignition secret generation: %w", err) + } + manifests = append(manifests, ignSecrets...) + + // TODO(vincepri): The context should be passed down from the caller, + // although today the Asset interface doesn't allow it, refactor once it does. + ctx, cancel := context.WithCancel(signals.SetupSignalHandler()) + go func() { + <-ctx.Done() + cancel() + clusterapi.System().Teardown() + }() + // Run the CAPI system. + capiSystem := clusterapi.System() + if err := capiSystem.Run(ctx, installConfig); err != nil { + return fileList, fmt.Errorf("failed to run cluster api system: %w", err) + } + + // Grab the client. + cl := capiSystem.Client() + + // Create all the manifests and store them. + for _, m := range manifests { + m.SetNamespace(capiutils.Namespace) + if err := cl.Create(context.Background(), m); err != nil { + return fileList, fmt.Errorf("failed to create manifest: %w", err) + } + logrus.Infof("Created manifest %+T, namespace=%s name=%s", m, m.GetNamespace(), m.GetName()) + } + + // Pass cluster kubeconfig and store it in; this is usually the role of a bootstrap provider. + { + key := client.ObjectKey{ + Name: clusterID.InfraID, + Namespace: capiutils.Namespace, + } + cluster := &clusterv1.Cluster{} + if err := cl.Get(context.Background(), key, cluster); err != nil { + // TODO (padillon): from this point forward statuses could be + // collected from the manifests appended to the fileList. + return fileList, err + } + // Create the secret. + clusterKubeconfig := clusterKubeconfigAsset.Files()[0].Data + secret := utilkubeconfig.GenerateSecret(cluster, clusterKubeconfig) + if err := cl.Create(context.Background(), secret); err != nil { + return fileList, err + } + } + + // Wait for the load balancer to be ready by checking the control plane endpoint + // on the cluster object. + var cluster *clusterv1.Cluster + { + if err := wait.ExponentialBackoff(wait.Backoff{ + Duration: time.Second * 10, + Factor: float64(1.5), + Steps: 32, + }, func() (bool, error) { + c := &clusterv1.Cluster{} + if err := cl.Get(context.Background(), client.ObjectKey{ + Name: clusterID.InfraID, + Namespace: capiutils.Namespace, + }, c); err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + cluster = c + return cluster.Spec.ControlPlaneEndpoint.IsValid(), nil + }); err != nil { + return fileList, err + } + if cluster == nil { + return fileList, errors.New("error occurred during load balancer ready check") + } + if cluster.Spec.ControlPlaneEndpoint.Host == "" { + return fileList, errors.New("control plane endpoint is not set") + } + } + + controlPlaneAvailableInput := ControlPlaneAvailableInput{ + Cluster: cluster, + InstallConfig: installConfig, + Client: cl, + InfraID: clusterID.InfraID, + } + if err := i.capiProvider.ControlPlaneAvailable(controlPlaneAvailableInput); err != nil { + return fileList, fmt.Errorf("failed provisioning resources after control plane available: %w", err) + } + + // For each manifest we created, retrieve it and store it in the asset. + for _, m := range manifests { + key := client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNamespace(), + } + if err := cl.Get(context.Background(), key, m); err != nil { + return fileList, fmt.Errorf("failed to get manifest: %w", err) + } + + gvk, err := cl.GroupVersionKindFor(m) + if err != nil { + return fileList, fmt.Errorf("failed to get GVK for manifest: %w", err) + } + fileName := fmt.Sprintf("%s-%s-%s.yaml", gvk.Kind, m.GetNamespace(), m.GetName()) + objData, err := yaml.Marshal(m) + if err != nil { + errMsg := fmt.Sprintf("failed to create infrastructure manifest %s from InstallConfig", fileName) + return fileList, errors.Wrapf(err, errMsg) + } + fileList = append(fileList, &asset.File{ + Filename: fileName, + Data: objData, + }) + } + + logrus.Infof("Cluster API resources have been created. Waiting for cluster to become ready...") + return fileList, nil +} + +// DestroyBootstrap destroys the temporary bootstrap resources. +func (i InfraProvider) DestroyBootstrap(dir string) error { + metadata, err := metadata.Load(dir) + if err != nil { + return err + } + + // TODO(padillon): start system if not running + if sys := clusterapi.System(); sys.State() == clusterapi.SystemStateRunning { + if err := sys.Client().Delete(context.TODO(), &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: capiutils.GenerateBoostrapMachineName(metadata.InfraID), + Namespace: capiutils.Namespace, + }, + }); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("failed to delete bootstrap machine: %w", err) + } + } + return nil +} + +// ExtractHostAddresses extracts the IPs of the bootstrap and control plane machines. +func (i InfraProvider) ExtractHostAddresses(dir string, config *types.InstallConfig, ha *infrastructure.HostAddresses) error { + return nil +} + +type DefaultCAPIProvider struct { + Provider +} + +func (d DefaultCAPIProvider) PreProvision(in PreProvisionInput) error { + logrus.Debugf("Default PreProvision: doing nothing") + return nil +} + +func (d DefaultCAPIProvider) Ignition(in IgnitionInput) ([]client.Object, error) { + logrus.Debugf("Using default ignition secret generation") + ignSecrets := []client.Object{} + masterIgn := string(in.MasterIgnData) + bootstrapIgn, err := injectInstallInfo(in.BootstrapIgnData) + if err != nil { + return nil, fmt.Errorf("unable to inject installation info: %w", err) + } + ignSecrets = append(ignSecrets, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", in.InfraID, "master"), + Namespace: capiutils.Namespace, + Labels: map[string]string{ + "cluster.x-k8s.io/cluster-name": in.InfraID, + }, + }, + Data: map[string][]byte{ + "format": []byte("ignition"), + "value": []byte(masterIgn), + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", in.InfraID, "bootstrap"), + Namespace: capiutils.Namespace, + Labels: map[string]string{ + "cluster.x-k8s.io/cluster-name": in.InfraID, + }, + }, + Data: map[string][]byte{ + "format": []byte("ignition"), + "value": []byte(bootstrapIgn), + }, + }, + ) + return ignSecrets, nil +} + +func (d DefaultCAPIProvider) ControlPlaneAvailable(in ControlPlaneAvailableInput) error { + logrus.Debugf("Default ControlPlaneAvailable, doing nothing") + return nil +} diff --git a/pkg/asset/manifests/clusterapi/helpers.go b/pkg/infrastructure/clusterapi/helpers.go similarity index 100% rename from pkg/asset/manifests/clusterapi/helpers.go rename to pkg/infrastructure/clusterapi/helpers.go diff --git a/pkg/infrastructure/platform/platform.go b/pkg/infrastructure/platform/platform.go index d28b082ed08..fe35c49fb02 100644 --- a/pkg/infrastructure/platform/platform.go +++ b/pkg/infrastructure/platform/platform.go @@ -9,6 +9,8 @@ import ( configv1 "github.com/openshift/api/config/v1" "github.com/openshift/installer/pkg/infrastructure" awsinfra "github.com/openshift/installer/pkg/infrastructure/aws" + awscapi "github.com/openshift/installer/pkg/infrastructure/aws/clusterapi" + "github.com/openshift/installer/pkg/infrastructure/clusterapi" "github.com/openshift/installer/pkg/terraform" "github.com/openshift/installer/pkg/terraform/stages/alibabacloud" "github.com/openshift/installer/pkg/terraform/stages/aws" @@ -45,6 +47,9 @@ func ProviderForPlatform(platform string, fg featuregates.FeatureGate) (infrastr case alibabacloudtypes.Name: return terraform.InitializeProvider(alibabacloud.PlatformStages), nil case awstypes.Name: + if fg.Enabled(configv1.FeatureGateClusterAPIInstall) { + return clusterapi.InitializeProvider(awscapi.Provider{}), nil + } if fg.Enabled(configv1.FeatureGateInstallAlternateInfrastructureAWS) { return awsinfra.InitializeProvider(), nil } diff --git a/pkg/infrastructure/platform/platform_altinfra.go b/pkg/infrastructure/platform/platform_altinfra.go index 79c2c31fb4f..dcf2bb44c81 100644 --- a/pkg/infrastructure/platform/platform_altinfra.go +++ b/pkg/infrastructure/platform/platform_altinfra.go @@ -6,8 +6,11 @@ package platform import ( "fmt" + configv1 "github.com/openshift/api/config/v1" "github.com/openshift/installer/pkg/infrastructure" "github.com/openshift/installer/pkg/infrastructure/aws" + awsinfra "github.com/openshift/installer/pkg/infrastructure/aws" + "github.com/openshift/installer/pkg/infrastructure/clusterapi" awstypes "github.com/openshift/installer/pkg/types/aws" azuretypes "github.com/openshift/installer/pkg/types/azure" "github.com/openshift/installer/pkg/types/featuregates" @@ -18,6 +21,9 @@ import ( func ProviderForPlatform(platform string, fg featuregates.FeatureGate) (infrastructure.Provider, error) { switch platform { case awstypes.Name: + if fg.Enabled(configv1.FeatureGateClusterAPIInstall) { + return clusterapi.InitializeProvider(awsinfra.CAPIInfraHelper{}), nil + } return aws.InitializeProvider(), nil case azuretypes.Name: panic("not implemented") diff --git a/pkg/infrastructure/provider.go b/pkg/infrastructure/provider.go index a0f1d7e5443..135807d554f 100644 --- a/pkg/infrastructure/provider.go +++ b/pkg/infrastructure/provider.go @@ -10,9 +10,9 @@ import ( type Provider interface { // Provision creates the infrastructure resources for the stage. // dir: the path of the install dir - // vars: cluster configuration input variables, such as terraform variables files + // parents: the parent assets, which can be used to obtain any cluser asset dependencies // returns a slice of File assets, which will be appended to the cluster asset file list. - Provision(dir string, vars []*asset.File) ([]*asset.File, error) + Provision(dir string, parents asset.Parents) ([]*asset.File, error) // DestroyBootstrap destroys the temporary bootstrap resources. DestroyBootstrap(dir string) error diff --git a/pkg/terraform/terraform.go b/pkg/terraform/terraform.go index 536779e053b..f2f6f0589e2 100644 --- a/pkg/terraform/terraform.go +++ b/pkg/terraform/terraform.go @@ -12,6 +12,7 @@ import ( "github.com/sirupsen/logrus" "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/cluster/tfvars" "github.com/openshift/installer/pkg/infrastructure" "github.com/openshift/installer/pkg/lineprinter" "github.com/openshift/installer/pkg/metrics/timer" @@ -35,7 +36,11 @@ func InitializeProvider(stages []Stage) infrastructure.Provider { // Provision implements pkg/infrastructure/provider.Provision. Provision iterates // through each of the stages and applies the Terraform config for the stage. -func (p *Provider) Provision(dir string, vars []*asset.File) ([]*asset.File, error) { +func (p *Provider) Provision(dir string, parents asset.Parents) ([]*asset.File, error) { + tfVars := &tfvars.TerraformVariables{} + parents.Get(tfVars) + vars := tfVars.Files() + fileList := []*asset.File{} terraformDir := filepath.Join(dir, "terraform") if err := os.Mkdir(terraformDir, 0777); err != nil { diff --git a/vendor/modules.txt b/vendor/modules.txt index 7639868a9ea..596d262849c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -9734,6 +9734,7 @@ sigs.k8s.io/cluster-api/util/version sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2 sigs.k8s.io/cluster-api-provider-aws/v2/feature +sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1 # sigs.k8s.io/cluster-api-provider-azure v0.0.0-00010101000000-000000000000 => sigs.k8s.io/cluster-api-provider-azure v1.11.1-0.20231026140308-a3f4914170d9 ## explicit; go 1.20 sigs.k8s.io/cluster-api-provider-azure/api/v1beta1 diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1/types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1/types.go new file mode 100644 index 00000000000..31479690664 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1/types.go @@ -0,0 +1,168 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=iam.aws.infrastructure.cluster.x-k8s.io +// +gencrdrefdocs:force +package v1beta1 + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +type ( + // Effect defines an AWS IAM effect. + Effect string + + // ConditionOperator defines an AWS condition operator. + ConditionOperator string + + // PrincipalType defines an AWS principle type. + PrincipalType string +) + +const ( + + // Any is the AWS IAM policy grammar wildcard. + Any = "*" + + // CurrentVersion is the latest version of the AWS IAM policy grammar. + CurrentVersion = "2012-10-17" + + // EffectAllow is the Allow effect in an AWS IAM policy statement entry. + EffectAllow Effect = "Allow" + + // EffectDeny is the Deny effect in an AWS IAM policy statement entry. + EffectDeny Effect = "Deny" + + // PrincipalAWS is the identity type covering AWS ARNs. + PrincipalAWS PrincipalType = "AWS" + + // PrincipalFederated is the identity type covering federated identities. + PrincipalFederated PrincipalType = "Federated" + + // PrincipalService is the identity type covering AWS services. + PrincipalService PrincipalType = "Service" + + // StringEquals is an AWS IAM policy condition operator. + StringEquals ConditionOperator = "StringEquals" + + // StringNotEquals is an AWS IAM policy condition operator. + StringNotEquals ConditionOperator = "StringNotEquals" + + // StringEqualsIgnoreCase is an AWS IAM policy condition operator. + StringEqualsIgnoreCase ConditionOperator = "StringEqualsIgnoreCase" + + // StringLike is an AWS IAM policy condition operator. + StringLike ConditionOperator = "StringLike" + + // StringNotLike is an AWS IAM policy condition operator. + StringNotLike ConditionOperator = "StringNotLike" + + // DefaultNameSuffix is the default suffix appended to all AWS IAM roles created by clusterawsadm. + DefaultNameSuffix = ".cluster-api-provider-aws.sigs.k8s.io" +) + +// PolicyDocument represents an AWS IAM policy document, and can be +// converted into JSON using "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/converters". +type PolicyDocument struct { + Version string `json:"Version,omitempty"` + Statement Statements `json:"Statement,omitempty"` + ID string `json:"Id,omitempty"` +} + +// StatementEntry represents each "statement" block in an AWS IAM policy document. +type StatementEntry struct { + Sid string `json:",omitempty"` + Principal Principals `json:",omitempty"` + NotPrincipal Principals `json:",omitempty"` + Effect Effect `json:"Effect"` + Action Actions `json:"Action"` + Resource Resources `json:",omitempty"` + Condition Conditions `json:"Condition,omitempty"` +} + +// Statements is the list of StatementEntries. +type Statements []StatementEntry + +// Principals is the map of all identities a statement entry refers to. +type Principals map[PrincipalType]PrincipalID + +// Actions is the list of actions. +type Actions []string + +// UnmarshalJSON is an Actions Unmarshaler. +func (actions *Actions) UnmarshalJSON(data []byte) error { + var ids []string + if err := json.Unmarshal(data, &ids); err == nil { + *actions = Actions(ids) + return nil + } + var id string + if err := json.Unmarshal(data, &id); err != nil { + return errors.Wrap(err, "couldn't unmarshal as either []string or string") + } + *actions = []string{id} + return nil +} + +// Resources is the list of resources. +type Resources []string + +// PrincipalID represents the list of all identities, such as ARNs. +type PrincipalID []string + +// UnmarshalJSON defines an Unmarshaler for a PrincipalID. +func (identityID *PrincipalID) UnmarshalJSON(data []byte) error { + var ids []string + if err := json.Unmarshal(data, &ids); err == nil { + *identityID = PrincipalID(ids) + return nil + } + var id string + if err := json.Unmarshal(data, &id); err != nil { + return errors.Wrap(err, "couldn't unmarshal as either []string or string") + } + *identityID = []string{id} + return nil +} + +// Conditions is the map of all conditions in the statement entry. +type Conditions map[ConditionOperator]interface{} + +// DeepCopyInto copies the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy copies the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..58105301ab9 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,209 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Actions) DeepCopyInto(out *Actions) { + { + in := &in + *out = make(Actions, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Actions. +func (in Actions) DeepCopy() Actions { + if in == nil { + return nil + } + out := new(Actions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyDocument) DeepCopyInto(out *PolicyDocument) { + *out = *in + if in.Statement != nil { + in, out := &in.Statement, &out.Statement + *out = make(Statements, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyDocument. +func (in *PolicyDocument) DeepCopy() *PolicyDocument { + if in == nil { + return nil + } + out := new(PolicyDocument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PrincipalID) DeepCopyInto(out *PrincipalID) { + { + in := &in + *out = make(PrincipalID, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrincipalID. +func (in PrincipalID) DeepCopy() PrincipalID { + if in == nil { + return nil + } + out := new(PrincipalID) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Principals) DeepCopyInto(out *Principals) { + { + in := &in + *out = make(Principals, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(PrincipalID, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Principals. +func (in Principals) DeepCopy() Principals { + if in == nil { + return nil + } + out := new(Principals) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Resources) DeepCopyInto(out *Resources) { + { + in := &in + *out = make(Resources, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources. +func (in Resources) DeepCopy() Resources { + if in == nil { + return nil + } + out := new(Resources) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatementEntry) DeepCopyInto(out *StatementEntry) { + *out = *in + if in.Principal != nil { + in, out := &in.Principal, &out.Principal + *out = make(Principals, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(PrincipalID, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.NotPrincipal != nil { + in, out := &in.NotPrincipal, &out.NotPrincipal + *out = make(Principals, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(PrincipalID, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make(Actions, len(*in)) + copy(*out, *in) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = make(Resources, len(*in)) + copy(*out, *in) + } + out.Condition = in.Condition.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatementEntry. +func (in *StatementEntry) DeepCopy() *StatementEntry { + if in == nil { + return nil + } + out := new(StatementEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Statements) DeepCopyInto(out *Statements) { + { + in := &in + *out = make(Statements, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Statements. +func (in Statements) DeepCopy() Statements { + if in == nil { + return nil + } + out := new(Statements) + in.DeepCopyInto(out) + return *out +}