Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
140 changes: 92 additions & 48 deletions controllers/openstackcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,13 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
scope.Logger.Info("Reconciling Cluster delete")

if err := deleteBastion(scope, cluster, openStackCluster); err != nil {
deleted, err := deleteBastion(scope, cluster, openStackCluster)
if err != nil {
return reconcile.Result{}, err
}
if !deleted {
return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil
}

networkingService, err := networking.NewService(scope)
if err != nil {
Expand Down Expand Up @@ -189,54 +193,51 @@ func contains(arr []string, target string) bool {
return false
}

func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) {
computeService, err := compute.NewService(scope)
if err != nil {
return err
return false, err
}
networkingService, err := networking.NewService(scope)
if err != nil {
return err
return false, err
}

instanceName := fmt.Sprintf("%s-bastion", cluster.Name)
instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, instanceName)
if err != nil {
return err
return false, err
}

if instanceStatus != nil {
instanceNS, err := instanceStatus.NetworkStatus()
if err != nil {
return err
return false, err
}
addresses := instanceNS.Addresses()

for _, address := range addresses {
if address.Type == corev1.NodeExternalIP {
if err = networkingService.DeleteFloatingIP(openStackCluster, address.Address); err != nil {
handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete floating IP: %v", err))
return errors.Errorf("failed to delete floating IP: %v", err)
return false, errors.Errorf("failed to delete floating IP: %v", err)
}
}
}
}

machineSpec := &openStackCluster.Spec.Bastion.Instance
if err = computeService.DeleteInstance(openStackCluster, machineSpec, instanceName, instanceStatus); err != nil {
handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion: %v", err))
return errors.Errorf("failed to delete bastion: %v", err)
instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name)
if err = computeService.DeleteInstance(openStackCluster, instanceSpec, instanceStatus); err != nil {
return false, errors.Errorf("failed to delete bastion: %v", err)
}

openStackCluster.Status.Bastion = nil

if err = networkingService.DeleteBastionSecurityGroup(openStackCluster, fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)); err != nil {
handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion security group: %v", err))
return errors.Errorf("failed to delete bastion security group: %v", err)
return false, errors.Errorf("failed to delete bastion security group: %v", err)
}
openStackCluster.Status.BastionSecurityGroup = nil

return nil
return true, nil
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like deleted is as simple as if err == nil? I guess you just want to be explicit?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Currently yes, but for the same reason as updating reconcile instance I want to be able to return an incomplete result. i.e. There was no error, but delete is not finished yet.

}

func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
Expand All @@ -259,10 +260,6 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch
return reconcile.Result{}, err
}

if err = reconcileBastion(scope, cluster, openStackCluster); err != nil {
return reconcile.Result{}, err
}

availabilityZones, err := computeService.GetAvailabilityZones()
if err != nil {
return ctrl.Result{}, err
Expand All @@ -288,14 +285,30 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch
}
}

openStackCluster.Status.Ready = true
openStackCluster.Status.FailureMessage = nil
openStackCluster.Status.FailureReason = nil
if !openStackCluster.Status.Ready {
openStackCluster.Status.Ready = true

// If we're setting Ready, return early to update status and
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a great move :-)

// allow dependent operations to proceed. Ensure we call
// reconcile again to create the bastion.
return reconcile.Result{Requeue: true}, nil
}

reconciled, err := reconcileBastion(scope, cluster, openStackCluster)
if err != nil {
return reconcile.Result{}, err
}
if !reconciled {
return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil
}

scope.Logger.Info("Reconciled Cluster create successfully")
return reconcile.Result{}, nil
}

func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) {
scope.Logger.Info("Reconciling Bastion")

if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled {
Expand All @@ -304,56 +317,87 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC

computeService, err := compute.NewService(scope)
if err != nil {
return err
return false, err
}

instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, fmt.Sprintf("%s-bastion", cluster.Name))
instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name)
instanceStatus, err := computeService.ReconcileInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name)
if err != nil {
return err
return false, errors.Errorf("failed to reconcile bastion: %v", err)
}
if instanceStatus != nil {
bastion, err := instanceStatus.APIInstance(openStackCluster)
if err != nil {
return err
}
openStackCluster.Status.Bastion = bastion
return nil
if instanceStatus == nil {
// Bastion is not ready yet
return false, nil
}

instanceStatus, err = computeService.CreateBastion(openStackCluster, cluster.Name)
// We overwrite the bastion status with the status of the instance from
// OpenStack. However, we don't want to lose any previously created
// floating IP which hasn't been associated yet, so we keep a reference
// to it here.
var floatingIP string
if openStackCluster.Status.Bastion != nil {
floatingIP = openStackCluster.Status.Bastion.FloatingIP
}

bastion, err := instanceStatus.APIInstance(openStackCluster)
if err != nil {
return errors.Errorf("failed to reconcile bastion: %v", err)
return false, err
}
openStackCluster.Status.Bastion = bastion

// Bastion already has a floating IP
if bastion.FloatingIP != "" {
return true, nil
}

networkingService, err := networking.NewService(scope)
if err != nil {
return err
return false, err
}

clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)
fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, openStackCluster.Spec.Bastion.Instance.FloatingIP)
fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, floatingIP)
if err != nil {
handleUpdateOSCError(openStackCluster, errors.Errorf("failed to get or create floating IP for bastion: %v", err))
return errors.Errorf("failed to get or create floating IP for bastion: %v", err)
return false, errors.Errorf("failed to get or create floating IP for bastion: %v", err)
}
bastion.FloatingIP = fp.FloatingIP

port, err := computeService.GetManagementPort(openStackCluster, instanceStatus)
if err != nil {
err = errors.Errorf("getting management port for bastion: %v", err)
handleUpdateOSCError(openStackCluster, err)
return err
return false, errors.Errorf("getting management port for bastion: %v", err)
}

err = networkingService.AssociateFloatingIP(openStackCluster, fp, port.ID)
if err != nil {
handleUpdateOSCError(openStackCluster, errors.Errorf("failed to associate floating IP with bastion: %v", err))
return errors.Errorf("failed to associate floating IP with bastion: %v", err)
return false, errors.Errorf("failed to associate floating IP with bastion: %v", err)
}

bastion, err := instanceStatus.APIInstance(openStackCluster)
if err != nil {
return err
return true, nil
}

func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, clusterName string) *compute.InstanceSpec {
name := fmt.Sprintf("%s-bastion", clusterName)
instanceSpec := &compute.InstanceSpec{
Name: name,
Flavor: openStackCluster.Spec.Bastion.Instance.Flavor,
SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName,
Image: openStackCluster.Spec.Bastion.Instance.Image,
ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID,
FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone,
RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume,
}
bastion.FloatingIP = fp.FloatingIP
openStackCluster.Status.Bastion = bastion
return nil

instanceSpec.SecurityGroups = openStackCluster.Spec.Bastion.Instance.SecurityGroups
if openStackCluster.Spec.ManagedSecurityGroups {
instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{
UUID: openStackCluster.Status.BastionSecurityGroup.ID,
})
}

instanceSpec.Networks = openStackCluster.Spec.Bastion.Instance.Networks
instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports

return instanceSpec
}

func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
Expand Down
102 changes: 83 additions & 19 deletions controllers/openstackmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ type OpenStackMachineReconciler struct {
}

const (
waitForClusterInfrastructureReadyDuration = 15 * time.Second
waitForInstanceBecomeActiveToReconcile = 60 * time.Second
defaultOpenStackBackOff = 15 * time.Second
waitForInstanceBecomeActiveToReconcile = 60 * time.Second
)

// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackmachines,verbs=get;list;watch;create;update;patch;delete
Expand Down Expand Up @@ -248,7 +248,14 @@ func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, scope
}
}

if err := computeService.DeleteInstance(openStackMachine, &openStackMachine.Spec, openStackMachine.Name, instanceStatus); err != nil {
instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, "")
if err != nil {
err = errors.Errorf("machine spec is invalid: %v", err)
handleUpdateMachineError(scope.Logger, openStackMachine, err)
return ctrl.Result{}, err
}

if err := computeService.DeleteInstance(openStackMachine, instanceSpec, instanceStatus); err != nil {
handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("error deleting OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err))
return ctrl.Result{}, nil
}
Expand Down Expand Up @@ -277,7 +284,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope

if !cluster.Status.InfrastructureReady {
scope.Logger.Info("Cluster infrastructure is not ready yet, requeuing machine")
return ctrl.Result{RequeueAfter: waitForClusterInfrastructureReadyDuration}, nil
return ctrl.Result{RequeueAfter: defaultOpenStackBackOff}, nil
}

// Make sure bootstrap data is available and populated.
Expand All @@ -303,16 +310,21 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
return ctrl.Result{}, err
}

instanceStatus, err := r.getOrCreate(scope.Logger, cluster, openStackCluster, machine, openStackMachine, computeService, userData)
instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData)
if err != nil {
handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("OpenStack instance cannot be created: %v", err))
err = errors.Errorf("machine spec is invalid: %v", err)
handleUpdateMachineError(scope.Logger, openStackMachine, err)
return ctrl.Result{}, err
}

// Set an error message if we couldn't find the instance.
instanceStatus, err := computeService.ReconcileInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name)
if err != nil {
err = errors.Errorf("openstack instance cannot be created: %v", err)
handleUpdateMachineError(scope.Logger, openStackMachine, err)
return ctrl.Result{}, err
}
if instanceStatus == nil {
handleUpdateMachineError(scope.Logger, openStackMachine, errors.New("OpenStack instance cannot be found"))
return ctrl.Result{}, nil
return ctrl.Result{RequeueAfter: defaultOpenStackBackOff}, nil
}

// TODO(sbueringer) From CAPA: TODO(ncdc): move this validation logic into a validating webhook (for us: create validation logic in webhook)
Expand Down Expand Up @@ -384,21 +396,73 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
return ctrl.Result{}, nil
}

func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) {
instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name)
if err != nil {
return nil, err
func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, userData string) (*compute.InstanceSpec, error) {
if openStackMachine == nil {
return nil, fmt.Errorf("create Options need be specified to create instace")
}

if instanceStatus == nil {
logger.Info("Machine not exist, Creating Machine", "Machine", openStackMachine.Name)
instanceStatus, err = computeService.CreateInstance(openStackCluster, machine, openStackMachine, cluster.Name, userData)
if err != nil {
return nil, errors.Errorf("error creating Openstack instance: %v", err)
if machine.Spec.FailureDomain == nil {
return nil, fmt.Errorf("failure domain not set")
}

instanceSpec := compute.InstanceSpec{
Name: openStackMachine.Name,
Image: openStackMachine.Spec.Image,
ImageUUID: openStackMachine.Spec.ImageUUID,
Flavor: openStackMachine.Spec.Flavor,
SSHKeyName: openStackMachine.Spec.SSHKeyName,
UserData: userData,
Metadata: openStackMachine.Spec.ServerMetadata,
ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive,
FailureDomain: *machine.Spec.FailureDomain,
RootVolume: openStackMachine.Spec.RootVolume,
Subnet: openStackMachine.Spec.Subnet,
ServerGroupID: openStackMachine.Spec.ServerGroupID,
Trunk: openStackMachine.Spec.Trunk,
}

machineTags := []string{}

// Append machine specific tags
machineTags = append(machineTags, openStackMachine.Spec.Tags...)

// Append cluster scope tags
machineTags = append(machineTags, openStackCluster.Spec.Tags...)

// tags need to be unique or the "apply tags" call will fail.
deduplicate := func(tags []string) []string {
seen := make(map[string]struct{}, len(machineTags))
unique := make([]string, 0, len(machineTags))
for _, tag := range tags {
if _, ok := seen[tag]; !ok {
seen[tag] = struct{}{}
unique = append(unique, tag)
}
}
return unique
}
machineTags = deduplicate(machineTags)

instanceSpec.Tags = machineTags

instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups
if openStackCluster.Spec.ManagedSecurityGroups {
var managedSecurityGroup string
if util.IsControlPlaneMachine(machine) {
managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID
} else {
managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID
}

instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{
UUID: managedSecurityGroup,
})
}

instanceSpec.Networks = openStackMachine.Spec.Networks
instanceSpec.Ports = openStackMachine.Spec.Ports

return instanceStatus, nil
return &instanceSpec, nil
}

func handleUpdateMachineError(logger logr.Logger, openstackMachine *infrav1.OpenStackMachine, message error) {
Expand Down
Loading