From f7d3575ffe271bb5958e1ea548e05c87ee71a0be Mon Sep 17 00:00:00 2001 From: Dario Tranchitella Date: Fri, 4 Aug 2023 15:08:50 +0200 Subject: [PATCH 1/3] feat: allowing nodes creation when cp is externally managed Signed-off-by: Dario Tranchitella --- config/rbac/role.yaml | 8 ++++++++ controllers/awsmachine_controller.go | 27 +++++++++++++++++++++++++++ pkg/cloud/scope/machine.go | 18 ++++++++++++++---- pkg/cloud/services/ec2/instances.go | 2 +- 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index c296d7364b..c511d27b77 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -119,6 +119,14 @@ rules: - get - list - watch +- apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - '*' + verbs: + - get + - list + - watch - apiGroups: - controlplane.cluster.x-k8s.io resources: diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 32a0863cdb..9c47adab1a 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -32,6 +32,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -60,6 +61,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -200,10 +202,16 @@ func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) infrav1.SetDefaults_AWSMachineSpec(&awsMachine.Spec) + cp, err := r.getControlPlane(ctx, log, cluster) + if err != nil { + return ctrl.Result{}, err + } + // Create the machine scope machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: r.Client, Cluster: cluster, + ControlPlane: cp, Machine: machine, InfraCluster: infraCluster, AWSMachine: awsMachine, @@ -1197,3 +1205,22 @@ func (r *AWSMachineReconciler) ensureInstanceMetadataOptions(ec2svc services.EC2 return ec2svc.ModifyInstanceMetadataOptions(instance.ID, machine.Spec.InstanceMetadataOptions) } + +// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch + +func (r *AWSMachineReconciler) getControlPlane(ctx context.Context, log *logger.Logger, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { + var ns string + + if ns = cluster.Spec.ControlPlaneRef.Namespace; ns == "" { + ns = cluster.Namespace + } + + controlPlane, err := external.Get(ctx, r.Client, cluster.Spec.ControlPlaneRef, ns) + if err != nil { + log.Error(err, "unable to get ControlPlane referenced in the given cluster", "cluster", fmt.Sprintf("%s/%s", cluster.Namespace, cluster.Name)) + + return nil, err + } + + return controlPlane, nil +} diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index ee98c78292..fcb735c22e 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -43,6 +44,7 @@ import ( type MachineScopeParams struct { Client client.Client Logger *logger.Logger + ControlPlane *unstructured.Unstructured Cluster *clusterv1.Cluster Machine *clusterv1.Machine InfraCluster EC2Scope @@ -67,6 +69,9 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { if params.InfraCluster == nil { return nil, errors.New("aws cluster is required when creating a MachineScope") } + if params.ControlPlane == nil { + return nil, errors.New("cluster control plane is required when creating a MachineScope") + } if params.Logger == nil { log := klog.Background() @@ -78,10 +83,10 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { return nil, errors.Wrap(err, "failed to init patch helper") } return &MachineScope{ - Logger: *params.Logger, - client: params.Client, - patchHelper: helper, - + Logger: *params.Logger, + client: params.Client, + patchHelper: helper, + ControlPlane: params.ControlPlane, Cluster: params.Cluster, Machine: params.Machine, InfraCluster: params.InfraCluster, @@ -97,6 +102,7 @@ type MachineScope struct { Cluster *clusterv1.Cluster Machine *clusterv1.Machine + ControlPlane *unstructured.Unstructured InfraCluster EC2Scope AWSMachine *infrav1.AWSMachine } @@ -371,6 +377,10 @@ func (m *MachineScope) IsEKSManaged() bool { return m.InfraCluster.InfraCluster().GetObjectKind().GroupVersionKind().Kind == ekscontrolplanev1.AWSManagedControlPlaneKind } +func (m *MachineScope) IsControlPlaneExternallyManaged() bool { + return util.IsExternalManagedControlPlane(m.ControlPlane) +} + // IsExternallyManaged checks if the machine is externally managed. func (m *MachineScope) IsExternallyManaged() bool { return annotations.IsExternallyManaged(m.InfraCluster.InfraCluster()) diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index ad9a746a8d..1fbcce3c90 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -181,7 +181,7 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use } input.SubnetID = subnetID - if !scope.IsExternallyManaged() && !scope.IsEKSManaged() && s.scope.Network().APIServerELB.DNSName == "" { + if !scope.IsControlPlaneExternallyManaged() && !scope.IsExternallyManaged() && !scope.IsEKSManaged() && s.scope.Network().APIServerELB.DNSName == "" { record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", "Failed to run controlplane, APIServer ELB not available") return nil, awserrors.NewFailedDependency("failed to run controlplane, APIServer ELB not available") From 4ee9c54d8e50b688ab62365d08dd2bf8d1bae28c Mon Sep 17 00:00:00 2001 From: Dario Tranchitella Date: Fri, 4 Aug 2023 15:09:49 +0200 Subject: [PATCH 2/3] feat: disabled load balancer enum for externally managed LBs Signed-off-by: Dario Tranchitella --- api/v1beta2/awscluster_types.go | 11 +- api/v1beta2/awscluster_webhook.go | 44 +++++++ api/v1beta2/awscluster_webhook_test.go | 121 ++++++++++++++++++ api/v1beta2/conditions_consts.go | 3 + ...tructure.cluster.x-k8s.io_awsclusters.yaml | 2 + ....cluster.x-k8s.io_awsclustertemplates.yaml | 2 + controllers/awscluster_controller.go | 92 +++++++++---- controllers/awscluster_controller_test.go | 112 ++++++++++++++++ controllers/awsmachine_controller_test.go | 2 + .../awsmachine_controller_unit_test.go | 32 +++-- controllers/suite_test.go | 2 + pkg/cloud/scope/machine_test.go | 11 +- pkg/cloud/services/ec2/instances_test.go | 2 + .../services/secretsmanager/secret_test.go | 2 + pkg/cloud/services/ssm/secret_test.go | 2 + 15 files changed, 398 insertions(+), 42 deletions(-) diff --git a/api/v1beta2/awscluster_types.go b/api/v1beta2/awscluster_types.go index 05b3887b2d..8043186085 100644 --- a/api/v1beta2/awscluster_types.go +++ b/api/v1beta2/awscluster_types.go @@ -167,10 +167,11 @@ type Bastion struct { type LoadBalancerType string var ( - LoadBalancerTypeClassic = LoadBalancerType("classic") - LoadBalancerTypeELB = LoadBalancerType("elb") - LoadBalancerTypeALB = LoadBalancerType("alb") - LoadBalancerTypeNLB = LoadBalancerType("nlb") + LoadBalancerTypeClassic = LoadBalancerType("classic") + LoadBalancerTypeELB = LoadBalancerType("elb") + LoadBalancerTypeALB = LoadBalancerType("alb") + LoadBalancerTypeNLB = LoadBalancerType("nlb") + LoadBalancerTypeDisabled = LoadBalancerType("disabled") ) // AWSLoadBalancerSpec defines the desired state of an AWS load balancer. @@ -229,7 +230,7 @@ type AWSLoadBalancerSpec struct { // LoadBalancerType sets the type for a load balancer. The default type is classic. // +kubebuilder:default=classic - // +kubebuilder:validation:Enum:=classic;elb;alb;nlb + // +kubebuilder:validation:Enum:=classic;elb;alb;nlb;disabled LoadBalancerType LoadBalancerType `json:"loadBalancerType,omitempty"` // DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index bbc3eb6a8f..4e1a2dbb12 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -298,5 +298,49 @@ func (r *AWSCluster) validateControlPlaneLBs() field.ErrorList { } } + if r.Spec.ControlPlaneLoadBalancer.LoadBalancerType == LoadBalancerTypeDisabled { + if r.Spec.ControlPlaneLoadBalancer.Name != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "name"), r.Spec.ControlPlaneLoadBalancer.Name, "cannot configure a name if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "crossZoneLoadBalancing"), r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing, "cross-zone load balancing cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.Subnets) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "subnets"), r.Spec.ControlPlaneLoadBalancer.Subnets, "subnets cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "healthCheckProtocol"), r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol, "healthcheck protocol cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalSecurityGroups"), r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups, "additional Security Groups cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.AdditionalListeners) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalListeners"), r.Spec.ControlPlaneLoadBalancer.AdditionalListeners, "cannot set additional listeners if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.IngressRules) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "ingress rules cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.PreserveClientIP { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "preserveClientIP"), r.Spec.ControlPlaneLoadBalancer.PreserveClientIP, "cannot preserve client IP if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "disableHostsRewrite"), r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite, "cannot disable hosts rewrite if the LoadBalancer reconciliation is disabled")) + } + } + + for _, rule := range r.Spec.ControlPlaneLoadBalancer.IngressRules { + if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) + } + } + return allErrs } diff --git a/api/v1beta2/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go index d0883a61ff..85342552c6 100644 --- a/api/v1beta2/awscluster_webhook_test.go +++ b/api/v1beta2/awscluster_webhook_test.go @@ -26,6 +26,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" @@ -50,6 +51,126 @@ func TestAWSClusterValidateCreate(t *testing.T) { wantErr bool expect func(g *WithT, res *AWSLoadBalancerSpec) }{ + { + name: "No options are allowed when LoadBalancer is disabled (name)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + LoadBalancerType: LoadBalancerTypeDisabled, + Name: ptr.To("name"), + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (crossZoneLoadBalancing)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + CrossZoneLoadBalancing: true, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (subnets)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + Subnets: []string{"foo", "bar"}, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (healthCheckProtocol)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + HealthCheckProtocol: &ELBProtocolTCP, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (additionalSecurityGroups)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + AdditionalSecurityGroups: []string{"foo", "bar"}, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (additionalListeners)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + AdditionalListeners: []AdditionalListenerSpec{ + { + Port: 6443, + Protocol: ELBProtocolTCP, + }, + }, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (ingressRules)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + IngressRules: []IngressRule{ + { + Description: "ingress rule", + Protocol: SecurityGroupProtocolTCP, + FromPort: 6443, + ToPort: 6443, + }, + }, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (disableHostsRewrite)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + DisableHostsRewrite: true, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, + { + name: "No options are allowed when LoadBalancer is disabled (preserveClientIP)", + cluster: &AWSCluster{ + Spec: AWSClusterSpec{ + ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ + PreserveClientIP: true, + LoadBalancerType: LoadBalancerTypeDisabled, + }, + }, + }, + wantErr: true, + }, // The SSHKeyName tests were moved to sshkeyname_test.go { name: "Supported schemes are 'internet-facing, Internet-facing, internal, or nil', rest will be rejected", diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index 9cd1870a99..bfbb96c77a 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -125,6 +125,9 @@ const ( LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. WaitForDNSNameReason = "WaitForDNSName" + // WaitForExternalControlPlaneEndpointReason is available when the AWS Cluster is waiting for an externally managed + // Load Balancer, such as an external Control Plane provider. + WaitForExternalControlPlaneEndpointReason = "WaitForExternalControlPlaneEndpoint" // WaitForDNSNameResolveReason used while waiting for DNS name to resolve. WaitForDNSNameResolveReason = "WaitForDNSNameResolve" // LoadBalancerFailedReason used when an error occurs during load balancer reconciliation. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index e23601b183..470eabea8b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -1109,6 +1109,7 @@ spec: - elb - alb - nlb + - disabled type: string name: description: Name sets the name of the classic ELB load balancer. @@ -1689,6 +1690,7 @@ spec: - elb - alb - nlb + - disabled type: string name: description: Name sets the name of the classic ELB load balancer. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml index 254e09dc86..25a5d63c16 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml @@ -707,6 +707,7 @@ spec: - elb - alb - nlb + - disabled type: string name: description: Name sets the name of the classic ELB load @@ -1316,6 +1317,7 @@ spec: - elb - alb - nlb + - disabled type: string name: description: Name sets the name of the classic ELB load diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index a3e0368d03..a6cfc81aed 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -266,6 +266,45 @@ func (r *AWSClusterReconciler) reconcileDelete(ctx context.Context, clusterScope return nil } +func (r *AWSClusterReconciler) reconcileLoadBalancer(clusterScope *scope.ClusterScope, awsCluster *infrav1.AWSCluster) (*time.Duration, error) { + retryAfterDuration := 15 * time.Second + if clusterScope.AWSCluster.Spec.ControlPlaneLoadBalancer.LoadBalancerType == infrav1.LoadBalancerTypeDisabled { + clusterScope.Debug("load balancer reconciliation shifted to external provider, checking external endpoint") + + return r.checkForExternalControlPlaneLoadBalancer(clusterScope, awsCluster), nil + } + + elbService := r.getELBService(clusterScope) + + if err := elbService.ReconcileLoadbalancers(); err != nil { + clusterScope.Error(err, "failed to reconcile load balancer") + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error()) + return nil, err + } + + if awsCluster.Status.Network.APIServerELB.DNSName == "" { + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "") + clusterScope.Info("Waiting on API server ELB DNS name") + return &retryAfterDuration, nil + } + + clusterScope.Debug("looking up IP address for DNS", "dns", awsCluster.Status.Network.APIServerELB.DNSName) + if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil { + clusterScope.Error(err, "failed to get IP address for dns name", "dns", awsCluster.Status.Network.APIServerELB.DNSName) + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameResolveReason, clusterv1.ConditionSeverityInfo, "") + clusterScope.Info("Waiting on API server ELB DNS name to resolve") + return &retryAfterDuration, nil + } + conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + + awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: awsCluster.Status.Network.APIServerELB.DNSName, + Port: clusterScope.APIServerPort(), + } + + return nil, nil +} + func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) (reconcile.Result, error) { clusterScope.Info("Reconciling AWSCluster") @@ -280,7 +319,6 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) } ec2Service := r.getEC2Service(clusterScope) - elbService := r.getELBService(clusterScope) networkSvc := r.getNetworkService(*clusterScope) sgService := r.getSecurityGroupService(*clusterScope) s3Service := s3.NewService(clusterScope) @@ -310,10 +348,10 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) } } - if err := elbService.ReconcileLoadbalancers(); err != nil { - clusterScope.Error(err, "failed to reconcile load balancer") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error()) + if requeueAfter, err := r.reconcileLoadBalancer(clusterScope, awsCluster); err != nil { return reconcile.Result{}, err + } else if requeueAfter != nil { + return reconcile.Result{RequeueAfter: *requeueAfter}, err } if err := s3Service.ReconcileBucket(); err != nil { @@ -321,26 +359,6 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name) } - if awsCluster.Status.Network.APIServerELB.DNSName == "" { - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "") - clusterScope.Info("Waiting on API server ELB DNS name") - return reconcile.Result{RequeueAfter: 15 * time.Second}, nil - } - - clusterScope.Debug("looking up IP address for DNS", "dns", awsCluster.Status.Network.APIServerELB.DNSName) - if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil { - clusterScope.Error(err, "failed to get IP address for dns name", "dns", awsCluster.Status.Network.APIServerELB.DNSName) - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameResolveReason, clusterv1.ConditionSeverityInfo, "") - clusterScope.Info("Waiting on API server ELB DNS name to resolve") - return reconcile.Result{RequeueAfter: 15 * time.Second}, nil - } - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) - - awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: awsCluster.Status.Network.APIServerELB.DNSName, - Port: clusterScope.APIServerPort(), - } - for _, subnet := range clusterScope.Subnets().FilterPrivate() { found := false for _, az := range awsCluster.Status.Network.APIServerELB.AvailabilityZones { @@ -447,3 +465,29 @@ func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(_ context.Con } } } + +func (r *AWSClusterReconciler) checkForExternalControlPlaneLoadBalancer(clusterScope *scope.ClusterScope, awsCluster *infrav1.AWSCluster) *time.Duration { + requeueAfterPeriod := 15 * time.Second + + switch { + case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0 && awsCluster.Spec.ControlPlaneEndpoint.Port == 0: + clusterScope.Info("AWSCluster control plane endpoint is still non-populated") + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + + return &requeueAfterPeriod + case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0: + clusterScope.Info("AWSCluster control plane endpoint host is still non-populated") + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + + return &requeueAfterPeriod + case awsCluster.Spec.ControlPlaneEndpoint.Port == 0: + clusterScope.Info("AWSCluster control plane endpoint port is still non-populated") + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + + return &requeueAfterPeriod + default: + conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + + return nil + } +} diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go index b50b61c2d7..d97e18d4db 100644 --- a/controllers/awscluster_controller_test.go +++ b/controllers/awscluster_controller_test.go @@ -29,6 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -65,7 +66,118 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { teardown := func() { mockCtrl.Finish() } + t.Run("Should wait for external Control Plane endpoint when LoadBalancer is disabled, and eventually succeed when patched", func(t *testing.T) { + g := NewWithT(t) + mockCtrl = gomock.NewController(t) + ec2Mock := mocks.NewMockEC2API(mockCtrl) + expect := func(m *mocks.MockEC2APIMockRecorder) { + // First iteration, when the AWS Cluster is missing a valid Control Plane Endpoint + mockedCreateVPCCalls(m) + mockedCreateSGCalls(false, m) + mockedDescribeInstanceCall(m) + // Second iteration: the AWS Cluster object has been patched, + // thus a valid Control Plane Endpoint has been provided + mockedCreateVPCCalls(m) + mockedCreateSGCalls(false, m) + mockedDescribeInstanceCall(m) + } + expect(ec2Mock.EXPECT()) + + setup(t) + controllerIdentity := createControllerIdentity(g) + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5))) + g.Expect(err).To(BeNil()) + // Creating the AWS cluster with a disabled Load Balancer: + // no ALB, ELB, or NLB specified, the AWS cluster must consistently be reported + // waiting for the control Plane endpoint. + awsCluster := getAWSCluster("test", ns.Name) + awsCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{ + LoadBalancerType: infrav1.LoadBalancerTypeDisabled, + } + + g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed()) + + defer teardown() + defer t.Cleanup(func() { + g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed()) + }) + + cs, err := getClusterScope(awsCluster) + g.Expect(err).To(BeNil()) + networkSvc := network.NewService(cs) + networkSvc.EC2Client = ec2Mock + reconciler.networkServiceFactory = func(clusterScope scope.ClusterScope) services.NetworkInterface { + return networkSvc + } + + ec2Svc := ec2Service.NewService(cs) + ec2Svc.EC2Client = ec2Mock + reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface { + return ec2Svc + } + testSecurityGroupRoles := []infrav1.SecurityGroupRole{ + infrav1.SecurityGroupBastion, + infrav1.SecurityGroupAPIServerLB, + infrav1.SecurityGroupLB, + infrav1.SecurityGroupControlPlane, + infrav1.SecurityGroupNode, + } + sgSvc := securitygroup.NewService(cs, testSecurityGroupRoles) + sgSvc.EC2Client = ec2Mock + + reconciler.securityGroupFactory = func(clusterScope scope.ClusterScope) services.SecurityGroupInterface { + return sgSvc + } + cs.SetSubnets([]infrav1.SubnetSpec{ + { + ID: "subnet-2", + AvailabilityZone: "us-east-1c", + IsPublic: true, + CidrBlock: "10.0.11.0/24", + }, + { + ID: "subnet-1", + AvailabilityZone: "us-east-1a", + CidrBlock: "10.0.10.0/24", + IsPublic: false, + }, + }) + + _, err = reconciler.reconcileNormal(cs) + g.Expect(err).To(BeNil()) + + cluster := &infrav1.AWSCluster{} + g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: cs.AWSCluster.Name, Namespace: cs.AWSCluster.Namespace}, cluster)).ToNot(HaveOccurred()) + g.Expect(cluster.Spec.ControlPlaneEndpoint.Host).To(BeEmpty()) + g.Expect(cluster.Spec.ControlPlaneEndpoint.Port).To(BeZero()) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ + {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1.ConditionSeverityInfo, reason: infrav1.WaitForExternalControlPlaneEndpointReason}, + }) + // Mimicking an external operator patching the cluster with an already provisioned Load Balancer: + // this could be done by a human who provisioned a LB, or by a Control Plane provider. + g.Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + if err = testEnv.Get(ctx, client.ObjectKey{Name: cs.AWSCluster.Name, Namespace: cs.AWSCluster.Namespace}, cs.AWSCluster); err != nil { + return err + } + + cs.AWSCluster.Spec.ControlPlaneEndpoint.Host = "10.0.10.1" + cs.AWSCluster.Spec.ControlPlaneEndpoint.Port = 6443 + return testEnv.Update(ctx, cs.AWSCluster) + })).To(Succeed()) + // Executing back a second reconciliation: + // the AWS Cluster should be ready with no LoadBalancer false condition. + _, err = reconciler.reconcileNormal(cs) + g.Expect(err).To(BeNil()) + g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ + {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + }) + }) t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC", func(t *testing.T) { g := NewWithT(t) mockCtrl = gomock.NewController(t) diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index 01122cad0e..733d6ce9e9 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -30,6 +30,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -417,6 +418,7 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s InfrastructureReady: true, }, }, + ControlPlane: &unstructured.Unstructured{}, Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test", diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index 38aa8bdb44..57af003cfe 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -33,6 +33,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -52,6 +53,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" ) @@ -129,6 +131,7 @@ func TestAWSMachineReconciler(t *testing.T) { }, }, InfraCluster: cs, + ControlPlane: &unstructured.Unstructured{}, AWSMachine: awsMachine, }, ) @@ -157,6 +160,7 @@ func TestAWSMachineReconciler(t *testing.T) { InfrastructureReady: true, }, }, + ControlPlane: &unstructured.Unstructured{}, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", @@ -390,7 +394,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStatePending))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) + g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) @@ -410,7 +414,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateRunning))) g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue()) - g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) + g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) expectConditions(g, ms.AWSMachine, []conditionAssertion{ {conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionTrue}, }) @@ -431,7 +435,7 @@ func TestAWSMachineReconciler(t *testing.T) { secretSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return("test", int32(1), nil).Times(1) _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state is undefined"))) + g.Expect(buf.String()).To(ContainSubstring("EC2 instance state is undefined")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("InstanceUnhandledState"))) g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"NewAWSMachineState\" is undefined"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionUnknown}}) @@ -572,7 +576,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopping))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) + g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) @@ -588,7 +592,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopped))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) + g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) @@ -604,7 +608,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateRunning))) g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue()) - g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) + g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) }) }) t.Run("deleting the AWSMachine manually", func(t *testing.T) { @@ -629,7 +633,7 @@ func TestAWSMachineReconciler(t *testing.T) { instance.State = infrav1.InstanceStateShuttingDown _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring(("Unexpected EC2 instance termination"))) + g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination"))) }) @@ -644,7 +648,7 @@ func TestAWSMachineReconciler(t *testing.T) { instance.State = infrav1.InstanceStateTerminated _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring(("Unexpected EC2 instance termination"))) + g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination"))) g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"terminated\" is unexpected"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceTerminatedReason}}) @@ -2440,6 +2444,10 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi ns := "testns" + cp := &kubeadmv1beta1.KubeadmControlPlane{} + cp.SetName("capi-cp-test-1") + cp.SetNamespace(ns) + ownerCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: ns}, Spec: clusterv1.ClusterSpec{ @@ -2449,6 +2457,12 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi Namespace: ns, APIVersion: infrav1.GroupVersion.String(), }, + ControlPlaneRef: &corev1.ObjectReference{ + Kind: "KubeadmControlPlane", + Namespace: cp.Namespace, + Name: cp.Name, + APIVersion: kubeadmv1beta1.GroupVersion.String(), + }, }, Status: clusterv1.ClusterStatus{ InfrastructureReady: true, @@ -2568,7 +2582,7 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi }, } - fakeClient := fake.NewClientBuilder().WithObjects(ownerCluster, awsCluster, ownerMachine, awsMachine, controllerIdentity, secret).WithStatusSubresource(awsCluster, awsMachine).Build() + fakeClient := fake.NewClientBuilder().WithObjects(ownerCluster, awsCluster, ownerMachine, awsMachine, controllerIdentity, secret, cp).WithStatusSubresource(awsCluster, awsMachine).Build() recorder := record.NewFakeRecorder(10) reconciler := &AWSMachineReconciler{ diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 4adf3e779d..98f392a7b1 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -29,6 +29,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" ) var ( @@ -45,6 +46,7 @@ func TestMain(m *testing.M) { func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(kubeadmv1beta1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go index f34790d061..9cad370f35 100644 --- a/pkg/cloud/scope/machine_test.go +++ b/pkg/cloud/scope/machine_test.go @@ -22,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -132,7 +133,8 @@ func setupMachineScope() (*MachineScope, error) { InfraCluster: &ClusterScope{ AWSCluster: awsCluster, }, - AWSMachine: awsMachine, + ControlPlane: &unstructured.Unstructured{}, + AWSMachine: awsMachine, }, ) } @@ -223,9 +225,10 @@ func TestGetRawBootstrapDataWithFormat(t *testing.T) { machineScope, err := NewMachineScope( MachineScopeParams{ - Client: client, - Machine: machine, - Cluster: cluster, + Client: client, + Machine: machine, + Cluster: cluster, + ControlPlane: &unstructured.Unstructured{}, InfraCluster: &ClusterScope{ AWSCluster: awsCluster, }, diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index f68e4a5f5e..9ccf5a67ba 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -31,6 +31,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -4035,6 +4036,7 @@ func TestCreateInstance(t *testing.T) { machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: client, Cluster: cluster, + ControlPlane: &unstructured.Unstructured{}, Machine: machine, AWSMachine: awsMachine, InfraCluster: clusterScope, diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go index 87cf7e958a..df4976ea4e 100644 --- a/pkg/cloud/services/secretsmanager/secret_test.go +++ b/pkg/cloud/services/secretsmanager/secret_test.go @@ -26,6 +26,7 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -280,6 +281,7 @@ func getClusterScope(client client.Client) (*scope.ClusterScope, error) { func getMachineScope(client client.Client, clusterScope *scope.ClusterScope) (*scope.MachineScope, error) { return scope.NewMachineScope(scope.MachineScopeParams{ Client: client, + ControlPlane: &unstructured.Unstructured{}, Cluster: clusterScope.Cluster, Machine: &clusterv1.Machine{}, InfraCluster: clusterScope, diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go index 04afa9e1d4..4e82494848 100644 --- a/pkg/cloud/services/ssm/secret_test.go +++ b/pkg/cloud/services/ssm/secret_test.go @@ -28,6 +28,7 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -273,6 +274,7 @@ func getClusterScope(client client.Client) (*scope.ClusterScope, error) { func getMachineScope(client client.Client, clusterScope *scope.ClusterScope) (*scope.MachineScope, error) { return scope.NewMachineScope(scope.MachineScopeParams{ Client: client, + ControlPlane: &unstructured.Unstructured{}, Cluster: clusterScope.Cluster, Machine: &clusterv1.Machine{}, InfraCluster: clusterScope, From fc3cb074e4e9e82964da6951ab438c63b3defd8a Mon Sep 17 00:00:00 2001 From: Dario Tranchitella Date: Mon, 2 Oct 2023 18:35:10 +0200 Subject: [PATCH 3/3] chore(log): capitalising debug message Signed-off-by: Dario Tranchitella --- controllers/awscluster_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index a6cfc81aed..13db38000a 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -288,7 +288,7 @@ func (r *AWSClusterReconciler) reconcileLoadBalancer(clusterScope *scope.Cluster return &retryAfterDuration, nil } - clusterScope.Debug("looking up IP address for DNS", "dns", awsCluster.Status.Network.APIServerELB.DNSName) + clusterScope.Debug("Looking up IP address for DNS", "dns", awsCluster.Status.Network.APIServerELB.DNSName) if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil { clusterScope.Error(err, "failed to get IP address for dns name", "dns", awsCluster.Status.Network.APIServerELB.DNSName) conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameResolveReason, clusterv1.ConditionSeverityInfo, "")