-
Notifications
You must be signed in to change notification settings - Fork 288
✨WIP: Refactor CreateInstance and CreateBastion #1153
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
7d9f21a
4c07893
6f61121
5a82b92
34cbf11
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -130,9 +130,13 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req | |
| func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { | ||
| scope.Logger.Info("Reconciling Cluster delete") | ||
|
|
||
| if err := deleteBastion(scope, cluster, openStackCluster); err != nil { | ||
| deleted, err := deleteBastion(scope, cluster, openStackCluster) | ||
| if err != nil { | ||
| return reconcile.Result{}, err | ||
| } | ||
| if !deleted { | ||
| return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil | ||
| } | ||
|
|
||
| networkingService, err := networking.NewService(scope) | ||
| if err != nil { | ||
|
|
@@ -189,54 +193,51 @@ func contains(arr []string, target string) bool { | |
| return false | ||
| } | ||
|
|
||
| func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { | ||
| func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) { | ||
| computeService, err := compute.NewService(scope) | ||
| if err != nil { | ||
| return err | ||
| return false, err | ||
| } | ||
| networkingService, err := networking.NewService(scope) | ||
| if err != nil { | ||
| return err | ||
| return false, err | ||
| } | ||
|
|
||
| instanceName := fmt.Sprintf("%s-bastion", cluster.Name) | ||
| instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, instanceName) | ||
| if err != nil { | ||
| return err | ||
| return false, err | ||
| } | ||
|
|
||
| if instanceStatus != nil { | ||
| instanceNS, err := instanceStatus.NetworkStatus() | ||
| if err != nil { | ||
| return err | ||
| return false, err | ||
| } | ||
| addresses := instanceNS.Addresses() | ||
|
|
||
| for _, address := range addresses { | ||
| if address.Type == corev1.NodeExternalIP { | ||
| if err = networkingService.DeleteFloatingIP(openStackCluster, address.Address); err != nil { | ||
| handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete floating IP: %v", err)) | ||
| return errors.Errorf("failed to delete floating IP: %v", err) | ||
| return false, errors.Errorf("failed to delete floating IP: %v", err) | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| machineSpec := &openStackCluster.Spec.Bastion.Instance | ||
| if err = computeService.DeleteInstance(openStackCluster, machineSpec, instanceName, instanceStatus); err != nil { | ||
| handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion: %v", err)) | ||
| return errors.Errorf("failed to delete bastion: %v", err) | ||
| instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) | ||
| if err = computeService.DeleteInstance(openStackCluster, instanceSpec, instanceStatus); err != nil { | ||
| return false, errors.Errorf("failed to delete bastion: %v", err) | ||
| } | ||
|
|
||
| openStackCluster.Status.Bastion = nil | ||
|
|
||
| if err = networkingService.DeleteBastionSecurityGroup(openStackCluster, fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)); err != nil { | ||
| handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete bastion security group: %v", err)) | ||
| return errors.Errorf("failed to delete bastion security group: %v", err) | ||
| return false, errors.Errorf("failed to delete bastion security group: %v", err) | ||
| } | ||
| openStackCluster.Status.BastionSecurityGroup = nil | ||
|
|
||
| return nil | ||
| return true, nil | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Currently yes, but for the same reason as updating reconcile instance I want to be able to return an incomplete result. i.e. There was no error, but delete is not finished yet. |
||
| } | ||
|
|
||
| func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { | ||
|
|
@@ -259,10 +260,6 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch | |
| return reconcile.Result{}, err | ||
| } | ||
|
|
||
| if err = reconcileBastion(scope, cluster, openStackCluster); err != nil { | ||
| return reconcile.Result{}, err | ||
| } | ||
|
|
||
| availabilityZones, err := computeService.GetAvailabilityZones() | ||
| if err != nil { | ||
| return ctrl.Result{}, err | ||
|
|
@@ -288,14 +285,30 @@ func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch | |
| } | ||
| } | ||
|
|
||
| openStackCluster.Status.Ready = true | ||
| openStackCluster.Status.FailureMessage = nil | ||
| openStackCluster.Status.FailureReason = nil | ||
| if !openStackCluster.Status.Ready { | ||
| openStackCluster.Status.Ready = true | ||
|
|
||
| // If we're setting Ready, return early to update status and | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's a great move :-) |
||
| // allow dependent operations to proceed. Ensure we call | ||
| // reconcile again to create the bastion. | ||
| return reconcile.Result{Requeue: true}, nil | ||
| } | ||
|
|
||
| reconciled, err := reconcileBastion(scope, cluster, openStackCluster) | ||
| if err != nil { | ||
| return reconcile.Result{}, err | ||
| } | ||
| if !reconciled { | ||
| return reconcile.Result{RequeueAfter: defaultOpenStackBackOff}, nil | ||
| } | ||
|
|
||
| scope.Logger.Info("Reconciled Cluster create successfully") | ||
| return reconcile.Result{}, nil | ||
| } | ||
|
|
||
| func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { | ||
| func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (bool, error) { | ||
| scope.Logger.Info("Reconciling Bastion") | ||
|
|
||
| if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled { | ||
|
|
@@ -304,56 +317,87 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC | |
|
|
||
| computeService, err := compute.NewService(scope) | ||
| if err != nil { | ||
| return err | ||
| return false, err | ||
| } | ||
|
|
||
| instanceStatus, err := computeService.GetInstanceStatusByName(openStackCluster, fmt.Sprintf("%s-bastion", cluster.Name)) | ||
| instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) | ||
| instanceStatus, err := computeService.ReconcileInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name) | ||
| if err != nil { | ||
| return err | ||
| return false, errors.Errorf("failed to reconcile bastion: %v", err) | ||
| } | ||
| if instanceStatus != nil { | ||
| bastion, err := instanceStatus.APIInstance(openStackCluster) | ||
| if err != nil { | ||
| return err | ||
| } | ||
| openStackCluster.Status.Bastion = bastion | ||
| return nil | ||
| if instanceStatus == nil { | ||
| // Bastion is not ready yet | ||
| return false, nil | ||
| } | ||
|
|
||
| instanceStatus, err = computeService.CreateBastion(openStackCluster, cluster.Name) | ||
| // We overwrite the bastion status with the status of the instance from | ||
| // OpenStack. However, we don't want to lose any previously created | ||
| // floating IP which hasn't been associated yet, so we keep a reference | ||
| // to it here. | ||
| var floatingIP string | ||
| if openStackCluster.Status.Bastion != nil { | ||
| floatingIP = openStackCluster.Status.Bastion.FloatingIP | ||
| } | ||
|
|
||
| bastion, err := instanceStatus.APIInstance(openStackCluster) | ||
| if err != nil { | ||
| return errors.Errorf("failed to reconcile bastion: %v", err) | ||
| return false, err | ||
| } | ||
| openStackCluster.Status.Bastion = bastion | ||
|
|
||
| // Bastion already has a floating IP | ||
| if bastion.FloatingIP != "" { | ||
| return true, nil | ||
| } | ||
|
|
||
| networkingService, err := networking.NewService(scope) | ||
| if err != nil { | ||
| return err | ||
| return false, err | ||
| } | ||
|
|
||
| clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) | ||
| fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, openStackCluster.Spec.Bastion.Instance.FloatingIP) | ||
| fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, floatingIP) | ||
| if err != nil { | ||
| handleUpdateOSCError(openStackCluster, errors.Errorf("failed to get or create floating IP for bastion: %v", err)) | ||
| return errors.Errorf("failed to get or create floating IP for bastion: %v", err) | ||
| return false, errors.Errorf("failed to get or create floating IP for bastion: %v", err) | ||
| } | ||
| bastion.FloatingIP = fp.FloatingIP | ||
|
|
||
| port, err := computeService.GetManagementPort(openStackCluster, instanceStatus) | ||
| if err != nil { | ||
| err = errors.Errorf("getting management port for bastion: %v", err) | ||
| handleUpdateOSCError(openStackCluster, err) | ||
| return err | ||
| return false, errors.Errorf("getting management port for bastion: %v", err) | ||
| } | ||
|
|
||
| err = networkingService.AssociateFloatingIP(openStackCluster, fp, port.ID) | ||
| if err != nil { | ||
| handleUpdateOSCError(openStackCluster, errors.Errorf("failed to associate floating IP with bastion: %v", err)) | ||
| return errors.Errorf("failed to associate floating IP with bastion: %v", err) | ||
| return false, errors.Errorf("failed to associate floating IP with bastion: %v", err) | ||
| } | ||
|
|
||
| bastion, err := instanceStatus.APIInstance(openStackCluster) | ||
| if err != nil { | ||
| return err | ||
| return true, nil | ||
| } | ||
|
|
||
| func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, clusterName string) *compute.InstanceSpec { | ||
| name := fmt.Sprintf("%s-bastion", clusterName) | ||
| instanceSpec := &compute.InstanceSpec{ | ||
| Name: name, | ||
| Flavor: openStackCluster.Spec.Bastion.Instance.Flavor, | ||
| SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName, | ||
| Image: openStackCluster.Spec.Bastion.Instance.Image, | ||
| ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID, | ||
| FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone, | ||
| RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, | ||
| } | ||
| bastion.FloatingIP = fp.FloatingIP | ||
| openStackCluster.Status.Bastion = bastion | ||
| return nil | ||
|
|
||
| instanceSpec.SecurityGroups = openStackCluster.Spec.Bastion.Instance.SecurityGroups | ||
| if openStackCluster.Spec.ManagedSecurityGroups { | ||
| instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupParam{ | ||
| UUID: openStackCluster.Status.BastionSecurityGroup.ID, | ||
| }) | ||
| } | ||
|
|
||
| instanceSpec.Networks = openStackCluster.Spec.Bastion.Instance.Networks | ||
| instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports | ||
|
|
||
| return instanceSpec | ||
| } | ||
|
|
||
| func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.