diff --git a/PROJECT b/PROJECT index 9db67d8145..26589c0275 100644 --- a/PROJECT +++ b/PROJECT @@ -44,4 +44,7 @@ resources: - group: infrastructure kind: OpenStackFloatingIPPool version: v1alpha1 +- group: infrastructure + kind: OpenStackServer + version: v1alpha1 version: "2" diff --git a/api/v1alpha1/conditions_consts.go b/api/v1alpha1/conditions_consts.go index e40c728ff8..018e580ad2 100644 --- a/api/v1alpha1/conditions_consts.go +++ b/api/v1alpha1/conditions_consts.go @@ -16,6 +16,8 @@ limitations under the License. package v1alpha1 +type ServerStatusError string + const ( // OpenstackFloatingIPPoolReadyCondition reports on the current status of the floating ip pool. Ready indicates that the pool is ready to be used. OpenstackFloatingIPPoolReadyCondition = "OpenstackFloatingIPPoolReadyCondition" @@ -25,4 +27,6 @@ const ( // UnableToFindFloatingIPNetworkReason is used when the floating ip network is not found. UnableToFindNetwork = "UnableToFindNetwork" + + CreateServerError ServerStatusError = "CreateError" ) diff --git a/api/v1alpha1/openstackserver_types.go b/api/v1alpha1/openstackserver_types.go new file mode 100644 index 0000000000..c82e27b0b8 --- /dev/null +++ b/api/v1alpha1/openstackserver_types.go @@ -0,0 +1,188 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/optional" +) + +const ( + // OpenStackServerFinalizer allows ReconcileOpenStackServer to clean up resources associated with OpenStackServer before + // removing it from the apiserver. + OpenStackServerFinalizer = "openstackserver.infrastructure.cluster.x-k8s.io" +) + +// OpenStackServerSpec defines the desired state of OpenStackServer. +type OpenStackServerSpec struct { + // AdditionalBlockDevices is a list of specifications for additional block devices to attach to the server instance. + // +listType=map + // +listMapKey=name + // +optional + AdditionalBlockDevices []infrav1.AdditionalBlockDevice `json:"additionalBlockDevices,omitempty"` + + // AvailabilityZone is the availability zone in which to create the server instance. + //+optional + AvailabilityZone optional.String `json:"availabilityZone,omitempty"` + + // ConfigDrive is a flag to enable config drive for the server instance. + // +optional + ConfigDrive optional.Bool `json:"configDrive,omitempty"` + + // The flavor reference for the flavor for the server instance. + // +required + Flavor string `json:"flavor"` + + // FloatingIPPoolRef is a reference to a FloatingIPPool to allocate a floating IP from. + // +optional + FloatingIPPoolRef *corev1.TypedLocalObjectReference `json:"floatingIPPoolRef,omitempty"` + + // IdentityRef is a reference to a secret holding OpenStack credentials. + // +required + IdentityRef infrav1.OpenStackIdentityReference `json:"identityRef"` + + // The image to use for the server instance. + // +required + Image infrav1.ImageParam `json:"image"` + + // Ports to be attached to the server instance. + // +required + Ports []infrav1.PortOpts `json:"ports"` + + // RootVolume is the specification for the root volume of the server instance. + // +optional + RootVolume *infrav1.RootVolume `json:"rootVolume,omitempty"` + + // SSHKeyName is the name of the SSH key to inject in the instance. + // +required + SSHKeyName string `json:"sshKeyName"` + + // SecurityGroups is a list of security groups names to assign to the instance. + // +optional + SecurityGroups []infrav1.SecurityGroupParam `json:"securityGroups,omitempty"` + + // ServerGroup is the server group to which the server instance belongs. + // +optional + ServerGroup *infrav1.ServerGroupParam `json:"serverGroup,omitempty"` + + // ServerMetadata is a map of key value pairs to add to the server instance. + // +listType=map + // +listMapKey=key + // +optional + ServerMetadata []infrav1.ServerMetadata `json:"serverMetadata,omitempty"` + + // Tags which will be added to the machine and all dependent resources + // which support them. These are in addition to Tags defined on the + // cluster. + // Requires Nova api 2.52 minimum! + // +listType=set + Tags []string `json:"tags,omitempty"` + + // Trunk is a flag to indicate if the server instance is created on a trunk port or not. + // +optional + Trunk optional.Bool `json:"trunk,omitempty"` + + // UserDataRef is a reference to a secret containing the user data to + // be injected into the server instance. + // +optional + UserDataRef *corev1.LocalObjectReference `json:"userDataRef,omitempty"` +} + +// OpenStackServerStatus defines the observed state of OpenStackServer. +type OpenStackServerStatus struct { + // Ready is true when the OpenStack server is ready. + // +kubebuilder:default=false + Ready bool `json:"ready"` + + // InstanceID is the ID of the server instance. + // +optional + InstanceID optional.String `json:"instanceID,omitempty"` + + // InstanceState is the state of the server instance. + // +optional + InstanceState *infrav1.InstanceState `json:"instanceState,omitempty"` + + // Addresses is the list of addresses of the server instance. + // +optional + Addresses []corev1.NodeAddress `json:"addresses,omitempty"` + + // Resolved contains parts of the machine spec with all external + // references fully resolved. + // +optional + Resolved *ResolvedServerSpec `json:"resolved,omitempty"` + + // Resources contains references to OpenStack resources created for the machine. + // +optional + Resources *ServerResources `json:"resources,omitempty"` + + // Conditions defines current service state of the OpenStackServer. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:path=openstackservers,scope=Namespaced,categories=cluster-api,shortName=oss +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="InstanceState",type="string",JSONPath=".status.instanceState",description="OpenStack instance state" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="OpenStack instance ready status" +// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".status.instanceID",description="OpenStack instance ID" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of OpenStack instance" + +// OpenStackServer is the Schema for the openstackservers API. +type OpenStackServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OpenStackServerSpec `json:"spec,omitempty"` + Status OpenStackServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OpenStackServerList contains a list of OpenStackServer. +type OpenStackServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackServer `json:"items"` +} + +// GetConditions returns the observations of the operational state of the OpenStackServer resource. +func (r *OpenStackServer) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the OpenStackServer to the predescribed clusterv1.Conditions. +func (r *OpenStackServer) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + +var _ infrav1.IdentityRefProvider = &OpenStackFloatingIPPool{} + +// GetIdentifyRef returns the Server's namespace and IdentityRef. +func (r *OpenStackServer) GetIdentityRef() (*string, *infrav1.OpenStackIdentityReference) { + return &r.Namespace, &r.Spec.IdentityRef +} + +func init() { + SchemeBuilder.Register(&OpenStackServer{}, &OpenStackServerList{}) +} diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go new file mode 100644 index 0000000000..2f339adaae --- /dev/null +++ b/api/v1alpha1/types.go @@ -0,0 +1,43 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" +) + +// ResolvedServerSpec contains resolved references to resources required by the server. +type ResolvedServerSpec struct { + // ServerGroupID is the ID of the server group the server should be added to and is calculated based on ServerGroupFilter. + // +optional + ServerGroupID string `json:"serverGroupID,omitempty"` + + // ImageID is the ID of the image to use for the server and is calculated based on ImageFilter. + // +optional + ImageID string `json:"imageID,omitempty"` + + // Ports is the fully resolved list of ports to create for the server. + // +optional + Ports []infrav1.ResolvedPortSpec `json:"ports,omitempty"` +} + +// ServerResources contains references to OpenStack resources created for the server. +type ServerResources struct { + // Ports is the status of the ports created for the server. + // +optional + Ports []infrav1.PortStatus `json:"ports,omitempty"` +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1dc4b7d3d1..55a7485a71 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -157,3 +158,234 @@ func (in *OpenStackFloatingIPPoolStatus) DeepCopy() *OpenStackFloatingIPPoolStat in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServer) DeepCopyInto(out *OpenStackServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServer. +func (in *OpenStackServer) DeepCopy() *OpenStackServer { + if in == nil { + return nil + } + out := new(OpenStackServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServerList) DeepCopyInto(out *OpenStackServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerList. +func (in *OpenStackServerList) DeepCopy() *OpenStackServerList { + if in == nil { + return nil + } + out := new(OpenStackServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServerSpec) DeepCopyInto(out *OpenStackServerSpec) { + *out = *in + if in.AdditionalBlockDevices != nil { + in, out := &in.AdditionalBlockDevices, &out.AdditionalBlockDevices + *out = make([]v1beta1.AdditionalBlockDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.ConfigDrive != nil { + in, out := &in.ConfigDrive, &out.ConfigDrive + *out = new(bool) + **out = **in + } + if in.FloatingIPPoolRef != nil { + in, out := &in.FloatingIPPoolRef, &out.FloatingIPPoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + out.IdentityRef = in.IdentityRef + in.Image.DeepCopyInto(&out.Image) + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1beta1.PortOpts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(v1beta1.RootVolume) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]v1beta1.SecurityGroupParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerGroup != nil { + in, out := &in.ServerGroup, &out.ServerGroup + *out = new(v1beta1.ServerGroupParam) + (*in).DeepCopyInto(*out) + } + if in.ServerMetadata != nil { + in, out := &in.ServerMetadata, &out.ServerMetadata + *out = make([]v1beta1.ServerMetadata, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Trunk != nil { + in, out := &in.Trunk, &out.Trunk + *out = new(bool) + **out = **in + } + if in.UserDataRef != nil { + in, out := &in.UserDataRef, &out.UserDataRef + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerSpec. +func (in *OpenStackServerSpec) DeepCopy() *OpenStackServerSpec { + if in == nil { + return nil + } + out := new(OpenStackServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServerStatus) DeepCopyInto(out *OpenStackServerStatus) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(v1beta1.InstanceState) + **out = **in + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1.NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Resolved != nil { + in, out := &in.Resolved, &out.Resolved + *out = new(ResolvedServerSpec) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ServerResources) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(apiv1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerStatus. +func (in *OpenStackServerStatus) DeepCopy() *OpenStackServerStatus { + if in == nil { + return nil + } + out := new(OpenStackServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolvedServerSpec) DeepCopyInto(out *ResolvedServerSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1beta1.ResolvedPortSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolvedServerSpec. +func (in *ResolvedServerSpec) DeepCopy() *ResolvedServerSpec { + if in == nil { + return nil + } + out := new(ResolvedServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerResources) DeepCopyInto(out *ServerResources) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1beta1.PortStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerResources. +func (in *ServerResources) DeepCopy() *ServerResources { + if in == nil { + return nil + } + out := new(ServerResources) + in.DeepCopyInto(out) + return out +} diff --git a/api_violations.report b/api_violations.report index 3482cb6c0e..52c6766e82 100644 --- a/api_violations.report +++ b/api_violations.report @@ -97,6 +97,11 @@ API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackFloatingIPPoolStatus,AvailableIPs API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackFloatingIPPoolStatus,ClaimedIPs API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackFloatingIPPoolStatus,FailedIPs +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackServerSpec,Ports +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackServerSpec,SecurityGroups +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackServerStatus,Addresses +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,ResolvedServerSpec,Ports +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,ServerResources,Ports API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6,APIServerLoadBalancer,AdditionalPorts API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6,APIServerLoadBalancer,AllowedCIDRs API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6,Instance,Networks diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml new file mode 100644 index 0000000000..c0a3a4c625 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml @@ -0,0 +1,1128 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: openstackservers.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: OpenStackServer + listKind: OpenStackServerList + plural: openstackservers + shortNames: + - oss + singular: openstackserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: OpenStack instance state + jsonPath: .status.instanceState + name: InstanceState + type: string + - description: OpenStack instance ready status + jsonPath: .status.ready + name: Ready + type: string + - description: OpenStack instance ID + jsonPath: .status.instanceID + name: InstanceID + type: string + - description: Time duration since creation of OpenStack instance + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: OpenStackServer is the Schema for the openstackservers API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpenStackServerSpec defines the desired state of OpenStackServer. + properties: + additionalBlockDevices: + description: AdditionalBlockDevices is a list of specifications for + additional block devices to attach to the server instance. + items: + description: AdditionalBlockDevice is a block device to attach to + the server. + properties: + name: + description: |- + Name of the block device in the context of a machine. + If the block device is a volume, the Cinder volume will be named + as a combination of the machine name and this name. + Also, this name will be used for tagging the block device. + Information about the block device tag can be obtained from the OpenStack + metadata API or the config drive. + Name cannot be 'root', which is reserved for the root volume. + type: string + sizeGiB: + description: SizeGiB is the size of the block device in gibibytes + (GiB). + minimum: 1 + type: integer + storage: + description: |- + Storage specifies the storage type of the block device and + additional storage options. + properties: + type: + description: |- + Type is the type of block device to create. + This can be either "Volume" or "Local". + type: string + volume: + description: Volume contains additional storage options + for a volume block device. + properties: + availabilityZone: + description: |- + AvailabilityZone is the volume availability zone to create the volume + in. If not specified, the volume will be created without an explicit + availability zone. + properties: + from: + default: Name + description: |- + From specifies where we will obtain the availability zone for the + volume. The options are "Name" and "Machine". If "Name" is specified + then the Name field must also be specified. If "Machine" is specified + the volume will use the value of FailureDomain, if any, from the + associated Machine. + enum: + - Name + - Machine + type: string + name: + description: |- + Name is the name of a volume availability zone to use. It is required + if From is "Name". The volume availability zone name may not contain + spaces. + minLength: 1 + pattern: ^[^ ]+$ + type: string + type: object + x-kubernetes-validations: + - message: name is required when from is 'Name' or default + rule: '!has(self.from) || self.from == ''Name'' ? + has(self.name) : !has(self.name)' + type: + description: |- + Type is the Cinder volume type of the volume. + If omitted, the default Cinder volume type that is configured in the OpenStack cloud + will be used. + type: string + type: object + required: + - type + type: object + required: + - name + - sizeGiB + - storage + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + availabilityZone: + description: AvailabilityZone is the availability zone in which to + create the server instance. + type: string + configDrive: + description: ConfigDrive is a flag to enable config drive for the + server instance. + type: boolean + flavor: + description: The flavor reference for the flavor for the server instance. + type: string + floatingIPPoolRef: + description: FloatingIPPoolRef is a reference to a FloatingIPPool + to allocate a floating IP from. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + identityRef: + description: IdentityRef is a reference to a secret holding OpenStack + credentials. + properties: + cloudName: + description: CloudName specifies the name of the entry in the + clouds.yaml file to use. + type: string + name: + description: |- + Name is the name of a secret in the same namespace as the resource being provisioned. + The secret must contain a key named `clouds.yaml` which contains an OpenStack clouds.yaml file. + The secret may optionally contain a key named `cacert` containing a PEM-encoded CA certificate. + type: string + required: + - cloudName + - name + type: object + image: + description: The image to use for the server instance. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: |- + Filter describes a query for an image. If specified, the combination + of name and tags must return a single matching image or an error will + be raised. + minProperties: 1 + properties: + name: + description: The name of the desired image. If specified, + the combination of name and tags must return a single matching + image or an error will be raised. + type: string + tags: + description: The tags associated with the desired image. If + specified, the combination of name and tags must return + a single matching image or an error will be raised. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the uuid of the image. ID will not be validated + before use. + format: uuid + type: string + type: object + ports: + description: Ports to be attached to the server instance. + items: + properties: + adminStateUp: + description: AdminStateUp specifies whether the port should + be created in the up (true) or down (false) state. The default + is up. + type: boolean + allowedAddressPairs: + description: |- + AllowedAddressPairs is a list of address pairs which Neutron will + allow the port to send traffic from in addition to the port's + addresses. If not specified, the MAC Address will be the MAC Address + of the port. Depending on the configuration of Neutron, it may be + supported to specify a CIDR instead of a specific IP address. + items: + properties: + ipAddress: + description: |- + IPAddress is the IP address of the allowed address pair. Depending on + the configuration of Neutron, it may be supported to specify a CIDR + instead of a specific IP address. + type: string + macAddress: + description: |- + MACAddress is the MAC address of the allowed address pair. If not + specified, the MAC address will be the MAC address of the port. + type: string + required: + - ipAddress + type: object + type: array + description: + description: Description is a human-readable description for + the port. + type: string + disablePortSecurity: + description: |- + DisablePortSecurity enables or disables the port security when set. + When not set, it takes the value of the corresponding field at the network level. + type: boolean + fixedIPs: + description: FixedIPs is a list of pairs of subnet and/or IP + address to assign to the port. If specified, these must be + subnets of the port's network. + items: + properties: + ipAddress: + description: |- + IPAddress is a specific IP address to assign to the port. If Subnet + is also specified, IPAddress must be a valid IP address in the + subnet. If Subnet is not specified, IPAddress must be a valid IP + address in any subnet of the port's network. + type: string + subnet: + description: |- + Subnet is an openstack subnet query that will return the id of a subnet to create + the fixed IP of a port in. This query must not return more than one subnet. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a filter to select the + subnet. It must match exactly one subnet. + minProperties: 1 + properties: + cidr: + type: string + description: + type: string + gatewayIP: + type: string + ipVersion: + type: integer + ipv6AddressMode: + type: string + ipv6RAMode: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the uuid of the subnet. It will + not be validated. + format: uuid + type: string + type: object + type: object + type: array + x-kubernetes-list-type: atomic + hostID: + description: HostID specifies the ID of the host where the port + resides. + type: string + macAddress: + description: MACAddress specifies the MAC address of the port. + If not specified, the MAC address will be generated. + type: string + nameSuffix: + description: NameSuffix will be appended to the name of the + port if specified. If unspecified, instead the 0-based index + of the port in the list is used. + type: string + network: + description: |- + Network is a query for an openstack network that the port will be created or discovered on. + This will fail if the query returns more than one network. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a filter to select an OpenStack + network. If provided, cannot be empty. + minProperties: 1 + properties: + description: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the ID of the network to use. If ID is + provided, the other filters cannot be provided. Must be + in UUID format. + format: uuid + type: string + type: object + profile: + description: |- + Profile is a set of key-value pairs that are used for binding + details. We intentionally don't expose this as a map[string]string + because we only want to enable the users to set the values of the + keys that are known to work in OpenStack Networking API. See + https://docs.openstack.org/api-ref/network/v2/index.html?expanded=create-port-detail#create-port + To set profiles, your tenant needs permissions rule:create_port, and + rule:create_port:binding:profile + properties: + ovsHWOffload: + description: |- + OVSHWOffload enables or disables the OVS hardware offload feature. + This flag is not required on OpenStack clouds since Yoga as Nova will set it automatically when the port is attached. + See: https://bugs.launchpad.net/nova/+bug/2020813 + type: boolean + trustedVF: + description: TrustedVF enables or disables the “trusted + mode” for the VF. + type: boolean + type: object + propagateUplinkStatus: + description: PropageteUplinkStatus enables or disables the propagate + uplink status on the port. + type: boolean + securityGroups: + description: SecurityGroups is a list of the names, uuids, filters + or any combination these of the security groups to assign + to the instance. + items: + description: SecurityGroupParam specifies an OpenStack security + group. It may be specified by ID or filter, but not both. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a query to select an OpenStack + security group. If provided, cannot be empty. + minProperties: 1 + properties: + description: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the ID of the security group to use. + If ID is provided, the other filters cannot be provided. + Must be in UUID format. + format: uuid + type: string + type: object + type: array + x-kubernetes-list-type: atomic + tags: + description: |- + Tags applied to the port (and corresponding trunk, if a trunk is configured.) + These tags are applied in addition to the instance's tags, which will also be applied to the port. + items: + type: string + type: array + x-kubernetes-list-type: set + trunk: + description: |- + Trunk specifies whether trunking is enabled at the port level. If not + provided the value is inherited from the machine, or false for a + bastion host. + type: boolean + valueSpecs: + description: |- + Value specs are extra parameters to include in the API request with OpenStack. + This is an extension point for the API, so what they do and if they are supported, + depends on the specific OpenStack implementation. + items: + description: ValueSpec represents a single value_spec key-value + pair. + properties: + key: + description: Key is the key in the key-value pair. + type: string + name: + description: |- + Name is the name of the key-value pair. + This is just for identifying the pair and will not be sent to the OpenStack API. + type: string + value: + description: Value is the value in the key-value pair. + type: string + required: + - key + - name + - value + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vnicType: + description: |- + VNICType specifies the type of vNIC which this port should be + attached to. This is used to determine which mechanism driver(s) to + be used to bind the port. The valid values are normal, macvtap, + direct, baremetal, direct-physical, virtio-forwarder, smart-nic and + remote-managed, although these values will not be validated in this + API to ensure compatibility with future neutron changes or custom + implementations. What type of vNIC is actually available depends on + deployments. If not specified, the Neutron default value is used. + type: string + type: object + type: array + rootVolume: + description: RootVolume is the specification for the root volume of + the server instance. + properties: + availabilityZone: + description: |- + AvailabilityZone is the volume availability zone to create the volume + in. If not specified, the volume will be created without an explicit + availability zone. + properties: + from: + default: Name + description: |- + From specifies where we will obtain the availability zone for the + volume. The options are "Name" and "Machine". If "Name" is specified + then the Name field must also be specified. If "Machine" is specified + the volume will use the value of FailureDomain, if any, from the + associated Machine. + enum: + - Name + - Machine + type: string + name: + description: |- + Name is the name of a volume availability zone to use. It is required + if From is "Name". The volume availability zone name may not contain + spaces. + minLength: 1 + pattern: ^[^ ]+$ + type: string + type: object + x-kubernetes-validations: + - message: name is required when from is 'Name' or default + rule: '!has(self.from) || self.from == ''Name'' ? has(self.name) + : !has(self.name)' + sizeGiB: + description: SizeGiB is the size of the block device in gibibytes + (GiB). + minimum: 1 + type: integer + type: + description: |- + Type is the Cinder volume type of the volume. + If omitted, the default Cinder volume type that is configured in the OpenStack cloud + will be used. + type: string + required: + - sizeGiB + type: object + securityGroups: + description: SecurityGroups is a list of security groups names to + assign to the instance. + items: + description: SecurityGroupParam specifies an OpenStack security + group. It may be specified by ID or filter, but not both. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a query to select an OpenStack + security group. If provided, cannot be empty. + minProperties: 1 + properties: + description: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the ID of the security group to use. If ID + is provided, the other filters cannot be provided. Must be + in UUID format. + format: uuid + type: string + type: object + type: array + serverGroup: + description: ServerGroup is the server group to which the server instance + belongs. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a query to select an OpenStack server + group. If provided, it cannot be empty. + minProperties: 1 + properties: + name: + description: Name is the name of a server group to look for. + type: string + type: object + id: + description: ID is the ID of the server group to use. + format: uuid + type: string + type: object + serverMetadata: + description: ServerMetadata is a map of key value pairs to add to + the server instance. + items: + properties: + key: + description: Key is the server metadata key + maxLength: 255 + type: string + value: + description: Value is the server metadata value + maxLength: 255 + type: string + required: + - key + - value + type: object + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + sshKeyName: + description: SSHKeyName is the name of the SSH key to inject in the + instance. + type: string + tags: + description: |- + Tags which will be added to the machine and all dependent resources + which support them. These are in addition to Tags defined on the + cluster. + Requires Nova api 2.52 minimum! + items: + type: string + type: array + x-kubernetes-list-type: set + trunk: + description: Trunk is a flag to indicate if the server instance is + created on a trunk port or not. + type: boolean + userDataRef: + description: |- + UserDataRef is a reference to a secret containing the user data to + be injected into the server instance. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - flavor + - identityRef + - image + - ports + - sshKeyName + type: object + status: + description: OpenStackServerStatus defines the observed state of OpenStackServer. + properties: + addresses: + description: Addresses is the list of addresses of the server instance. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: Conditions defines current service state of the OpenStackServer. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. + type: string + severity: + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + instanceID: + description: InstanceID is the ID of the server instance. + type: string + instanceState: + description: InstanceState is the state of the server instance. + type: string + ready: + default: false + description: Ready is true when the OpenStack server is ready. + type: boolean + resolved: + description: |- + Resolved contains parts of the machine spec with all external + references fully resolved. + properties: + imageID: + description: ImageID is the ID of the image to use for the server + and is calculated based on ImageFilter. + type: string + ports: + description: Ports is the fully resolved list of ports to create + for the server. + items: + description: ResolvedPortSpec is a PortOpts with all contained + references fully resolved. + properties: + adminStateUp: + description: AdminStateUp specifies whether the port should + be created in the up (true) or down (false) state. The + default is up. + type: boolean + allowedAddressPairs: + description: |- + AllowedAddressPairs is a list of address pairs which Neutron will + allow the port to send traffic from in addition to the port's + addresses. If not specified, the MAC Address will be the MAC Address + of the port. Depending on the configuration of Neutron, it may be + supported to specify a CIDR instead of a specific IP address. + items: + properties: + ipAddress: + description: |- + IPAddress is the IP address of the allowed address pair. Depending on + the configuration of Neutron, it may be supported to specify a CIDR + instead of a specific IP address. + type: string + macAddress: + description: |- + MACAddress is the MAC address of the allowed address pair. If not + specified, the MAC address will be the MAC address of the port. + type: string + required: + - ipAddress + type: object + type: array + description: + description: Description is a human-readable description + for the port. + type: string + disablePortSecurity: + description: |- + DisablePortSecurity enables or disables the port security when set. + When not set, it takes the value of the corresponding field at the network level. + type: boolean + fixedIPs: + description: FixedIPs is a list of pairs of subnet and/or + IP address to assign to the port. If specified, these + must be subnets of the port's network. + items: + description: ResolvedFixedIP is a FixedIP with the Subnet + resolved to an ID. + properties: + ipAddress: + description: |- + IPAddress is a specific IP address to assign to the port. If SubnetID + is also specified, IPAddress must be a valid IP address in the + subnet. If Subnet is not specified, IPAddress must be a valid IP + address in any subnet of the port's network. + type: string + subnet: + description: SubnetID is the id of a subnet to create + the fixed IP of a port in. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + hostID: + description: HostID specifies the ID of the host where the + port resides. + type: string + macAddress: + description: MACAddress specifies the MAC address of the + port. If not specified, the MAC address will be generated. + type: string + name: + description: Name is the name of the port. + type: string + networkID: + description: NetworkID is the ID of the network the port + will be created in. + type: string + profile: + description: |- + Profile is a set of key-value pairs that are used for binding + details. We intentionally don't expose this as a map[string]string + because we only want to enable the users to set the values of the + keys that are known to work in OpenStack Networking API. See + https://docs.openstack.org/api-ref/network/v2/index.html?expanded=create-port-detail#create-port + To set profiles, your tenant needs permissions rule:create_port, and + rule:create_port:binding:profile + properties: + ovsHWOffload: + description: |- + OVSHWOffload enables or disables the OVS hardware offload feature. + This flag is not required on OpenStack clouds since Yoga as Nova will set it automatically when the port is attached. + See: https://bugs.launchpad.net/nova/+bug/2020813 + type: boolean + trustedVF: + description: TrustedVF enables or disables the “trusted + mode” for the VF. + type: boolean + type: object + propagateUplinkStatus: + description: PropageteUplinkStatus enables or disables the + propagate uplink status on the port. + type: boolean + securityGroups: + description: SecurityGroups is a list of security group + IDs to assign to the port. + items: + type: string + type: array + x-kubernetes-list-type: atomic + tags: + description: Tags applied to the port (and corresponding + trunk, if a trunk is configured.) + items: + type: string + type: array + x-kubernetes-list-type: set + trunk: + description: Trunk specifies whether trunking is enabled + at the port level. + type: boolean + valueSpecs: + description: |- + Value specs are extra parameters to include in the API request with OpenStack. + This is an extension point for the API, so what they do and if they are supported, + depends on the specific OpenStack implementation. + items: + description: ValueSpec represents a single value_spec + key-value pair. + properties: + key: + description: Key is the key in the key-value pair. + type: string + name: + description: |- + Name is the name of the key-value pair. + This is just for identifying the pair and will not be sent to the OpenStack API. + type: string + value: + description: Value is the value in the key-value pair. + type: string + required: + - key + - name + - value + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vnicType: + description: |- + VNICType specifies the type of vNIC which this port should be + attached to. This is used to determine which mechanism driver(s) to + be used to bind the port. The valid values are normal, macvtap, + direct, baremetal, direct-physical, virtio-forwarder, smart-nic and + remote-managed, although these values will not be validated in this + API to ensure compatibility with future neutron changes or custom + implementations. What type of vNIC is actually available depends on + deployments. If not specified, the Neutron default value is used. + type: string + required: + - description + - name + - networkID + type: object + type: array + serverGroupID: + description: ServerGroupID is the ID of the server group the server + should be added to and is calculated based on ServerGroupFilter. + type: string + type: object + resources: + description: Resources contains references to OpenStack resources + created for the machine. + properties: + ports: + description: Ports is the status of the ports created for the + server. + items: + properties: + id: + description: ID is the unique identifier of the port. + type: string + required: + - id + type: object + type: array + type: object + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index a4c9e050cf..23697e7f31 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml - bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml - bases/infrastructure.cluster.x-k8s.io_openstackfloatingippools.yaml +- bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 4790f87801..7a31f4c2e0 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -113,6 +113,26 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackservers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackservers/status + verbs: + - get + - patch + - update - apiGroups: - ipam.cluster.x-k8s.io resources: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 032b3756be..d47c93d80a 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -88,3 +88,24 @@ webhooks: resources: - openstackmachinetemplates sideEffects: None +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-openstackserver + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.openstackserver.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - openstackservers + sideEffects: None diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 22fe0b7ec0..9106d27721 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -26,7 +26,9 @@ import ( "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" @@ -45,6 +47,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/loadbalancer" @@ -56,7 +59,7 @@ import ( ) const ( - BastionInstanceHashAnnotation = "infrastructure.cluster.x-k8s.io/bastion-hash" + waitForBastionToReconcile = 15 * time.Second ) // OpenStackClusterReconciler reconciles a OpenStackCluster object. @@ -128,7 +131,7 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req } // Handle non-deleted clusters - return reconcileNormal(scope, cluster, openStackCluster) + return r.reconcileNormal(ctx, scope, cluster, openStackCluster) } func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { @@ -153,14 +156,20 @@ func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope // A bastion may have been created if cluster initialisation previously reached populating the network status // We attempt to delete it even if no status was written, just in case if openStackCluster.Status.Network != nil { - // Attempt to resolve bastion resources before delete. We don't need to worry about starting if the resources have changed on update. - if _, err := resolveBastionResources(scope, clusterResourceName, openStackCluster); err != nil { + if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil { return reconcile.Result{}, err } + } - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return reconcile.Result{}, err - } + // If a bastion server was found, we need to reconcile now until it's actually deleted. + // We don't want to remove the cluster finalizer until the associated OpenStackServer resource is deleted. + bastionServer, err := r.getBastionServer(ctx, openStackCluster, cluster) + if client.IgnoreNotFound(err) != nil { + return reconcile.Result{}, err + } + if bastionServer != nil { + scope.Logger().Info("Waiting for the bastion OpenStackServer object to be deleted", "openStackServer", bastionServer.Name) + return ctrl.Result{Requeue: true}, nil } networkingService, err := networking.NewService(scope) @@ -218,46 +227,7 @@ func contains(arr []string, target string) bool { return false } -func resolveBastionResources(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster) (bool, error) { - // Resolve and store resources for the bastion - if openStackCluster.Spec.Bastion.IsEnabled() { - if openStackCluster.Status.Bastion == nil { - openStackCluster.Status.Bastion = &infrav1.BastionStatus{} - } - if openStackCluster.Spec.Bastion.Spec == nil { - return false, fmt.Errorf("bastion spec is nil when bastion is enabled, this shouldn't happen") - } - resolved := openStackCluster.Status.Bastion.Resolved - if resolved == nil { - resolved = &infrav1.ResolvedMachineSpec{} - openStackCluster.Status.Bastion.Resolved = resolved - } - changed, err := compute.ResolveMachineSpec(scope, - openStackCluster.Spec.Bastion.Spec, resolved, - clusterResourceName, bastionName(clusterResourceName), - openStackCluster, getBastionSecurityGroupID(openStackCluster)) - if err != nil { - return false, err - } - if changed { - // If the resolved machine spec changed we need to restart the reconcile to avoid inconsistencies between reconciles. - return true, nil - } - resources := openStackCluster.Status.Bastion.Resources - if resources == nil { - resources = &infrav1.MachineResources{} - openStackCluster.Status.Bastion.Resources = resources - } - - err = compute.AdoptMachineResources(scope, resolved, resources) - if err != nil { - return false, err - } - } - return false, nil -} - -func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { +func (r *OpenStackClusterReconciler) deleteBastion(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { scope.Logger().Info("Deleting Bastion") computeService, err := compute.NewService(scope) @@ -269,6 +239,11 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac return err } + bastionServer, err := r.getBastionServer(ctx, openStackCluster, cluster) + if client.IgnoreNotFound(err) != nil { + return err + } + if openStackCluster.Status.Bastion != nil && openStackCluster.Status.Bastion.FloatingIP != "" { if err = networkingService.DeleteFloatingIP(openStackCluster, openStackCluster.Status.Bastion.FloatingIP); err != nil { handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete floating IP: %w", err), false) @@ -279,28 +254,14 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac bastionStatus := openStackCluster.Status.Bastion var instanceStatus *compute.InstanceStatus - if bastionStatus != nil && bastionStatus.ID != "" { - instanceStatus, err = computeService.GetInstanceStatus(openStackCluster.Status.Bastion.ID) - if err != nil { - return err - } - } else { - instanceStatus, err = computeService.GetInstanceStatusByName(openStackCluster, bastionName(cluster.Name)) + if bastionStatus != nil && bastionServer != nil && bastionServer.Status.InstanceID != nil { + instanceStatus, err = computeService.GetInstanceStatus(*bastionServer.Status.InstanceID) if err != nil { return err } } - // If no instance was created we currently need to check for orphaned - // volumes. - if instanceStatus == nil { - bastion := openStackCluster.Spec.Bastion - if bastion != nil && bastion.Spec != nil { - if err := computeService.DeleteVolumes(bastionName(cluster.Name), bastion.Spec.RootVolume, bastion.Spec.AdditionalBlockDevices); err != nil { - return fmt.Errorf("delete volumes: %w", err) - } - } - } else { + if instanceStatus != nil { instanceNS, err := instanceStatus.NetworkStatus() if err != nil { return err @@ -316,36 +277,20 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac } } } - - if err = computeService.DeleteInstance(openStackCluster, instanceStatus); err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete bastion: %w", err), false) - return fmt.Errorf("failed to delete bastion: %w", err) - } } - if bastionStatus != nil && bastionStatus.Resources != nil { - trunkSupported, err := networkingService.IsTrunkExtSupported() - if err != nil { - return err - } - for _, port := range bastionStatus.Resources.Ports { - if err := networkingService.DeleteInstanceTrunkAndPort(openStackCluster, port, trunkSupported); err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete port: %w", err), false) - return fmt.Errorf("failed to delete port: %w", err) - } - } - bastionStatus.Resources.Ports = nil + if err := r.reconcileDeleteBastionServer(ctx, scope, openStackCluster, cluster); err != nil { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete bastion: %w", err), false) + return fmt.Errorf("failed to delete bastion: %w", err) } - scope.Logger().Info("Deleted Bastion") - openStackCluster.Status.Bastion = nil - delete(openStackCluster.ObjectMeta.Annotations, BastionInstanceHashAnnotation) + scope.Logger().Info("Deleted Bastion") return nil } -func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam +func (r *OpenStackClusterReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam scope.Logger().Info("Reconciling Cluster") // If the OpenStackCluster doesn't have our finalizer, add it. @@ -364,7 +309,9 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt return reconcile.Result{}, err } - result, err := reconcileBastion(scope, cluster, openStackCluster) + // TODO(emilien) we should do that separately but the reconcileBastion + // should happen after the cluster Ready is true + result, err := r.reconcileBastion(ctx, scope, cluster, openStackCluster) if err != nil { return reconcile.Result{}, err } @@ -399,32 +346,10 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt return reconcile.Result{}, nil } -func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) { +func (r *OpenStackClusterReconciler) reconcileBastion(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) { scope.Logger().V(4).Info("Reconciling Bastion") clusterResourceName := names.ClusterResourceName(cluster) - changed, err := resolveBastionResources(scope, clusterResourceName, openStackCluster) - if err != nil { - return nil, err - } - if changed { - return &reconcile.Result{}, nil - } - - // No Bastion defined - if !openStackCluster.Spec.Bastion.IsEnabled() { - // Delete any existing bastion - if openStackCluster.Status.Bastion != nil { - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return nil, err - } - // Reconcile again before continuing - return &reconcile.Result{}, nil - } - - // Otherwise nothing to do - return nil, nil - } computeService, err := compute.NewService(scope) if err != nil { @@ -436,71 +361,28 @@ func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openS return nil, err } - instanceSpec, err := bastionToInstanceSpec(openStackCluster, cluster) - if err != nil { - return nil, err - } - - bastionHash, err := compute.HashInstanceSpec(instanceSpec) - if err != nil { - return nil, fmt.Errorf("failed computing bastion hash from instance spec: %w", err) + bastionServer, waitingForServer, err := r.reconcileBastionServer(ctx, scope, openStackCluster, cluster) + if err != nil || waitingForServer { + return &reconcile.Result{RequeueAfter: waitForBastionToReconcile}, err } - if bastionHashHasChanged(bastionHash, openStackCluster.ObjectMeta.Annotations) { - scope.Logger().Info("Bastion instance spec has changed, deleting existing bastion") - - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return nil, err - } - - // Add the new annotation and reconcile again before continuing - annotations.AddAnnotations(openStackCluster, map[string]string{BastionInstanceHashAnnotation: bastionHash}) - return &reconcile.Result{}, nil - } - - err = getOrCreateBastionPorts(openStackCluster, networkingService) - if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to get or create ports for bastion: %w", err), false) - return nil, fmt.Errorf("failed to get or create ports for bastion: %w", err) + if bastionServer == nil { + return nil, nil } - bastionPortIDs := GetPortIDs(openStackCluster.Status.Bastion.Resources.Ports) var instanceStatus *compute.InstanceStatus - if openStackCluster.Status.Bastion != nil && openStackCluster.Status.Bastion.ID != "" { - if instanceStatus, err = computeService.GetInstanceStatus(openStackCluster.Status.Bastion.ID); err != nil { - return nil, err - } - } - if instanceStatus == nil { - // Check if there is an existing instance with bastion name, in case where bastion ID would not have been properly stored in cluster status - if instanceStatus, err = computeService.GetInstanceStatusByName(openStackCluster, instanceSpec.Name); err != nil { + if bastionServer != nil && bastionServer.Status.InstanceID != nil { + if instanceStatus, err = computeService.GetInstanceStatus(*bastionServer.Status.InstanceID); err != nil { return nil, err } } if instanceStatus == nil { - instanceStatus, err = computeService.CreateInstance(openStackCluster, instanceSpec, bastionPortIDs) - if err != nil { - return nil, fmt.Errorf("failed to create bastion: %w", err) - } + // At this point we return an error if we don't have an instance status + return nil, fmt.Errorf("bastion instance status is nil") } // Save hash & status as soon as we know we have an instance instanceStatus.UpdateBastionStatus(openStackCluster) - // Make sure that bastion instance has a valid state - switch instanceStatus.State() { - case infrav1.InstanceStateError: - return nil, fmt.Errorf("failed to reconcile bastion, instance state is ERROR") - case infrav1.InstanceStateBuild, infrav1.InstanceStateUndefined: - scope.Logger().Info("Waiting for bastion instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) - return &reconcile.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil - case infrav1.InstanceStateDeleted: - // Not clear why this would happen, so try to clean everything up before reconciling again - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return nil, err - } - return &reconcile.Result{}, nil - } - port, err := computeService.GetManagementPort(openStackCluster, instanceStatus) if err != nil { err = fmt.Errorf("getting management port for bastion: %w", err) @@ -549,7 +431,123 @@ func bastionAddFloatingIP(openStackCluster *infrav1.OpenStackCluster, clusterRes return nil, nil } -func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*compute.InstanceSpec, error) { +// reconcileDeleteBastionServer reconciles the OpenStackServer object for the OpenStackCluster bastion. +// It returns nil if the OpenStackServer object is not found, otherwise it returns an error if any. +func (r *OpenStackClusterReconciler) reconcileDeleteBastionServer(ctx context.Context, scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) error { + scope.Logger().Info("Reconciling Bastion delete server") + server := &infrav1alpha1.OpenStackServer{} + err := r.Client.Get(ctx, client.ObjectKey{Namespace: openStackCluster.Namespace, Name: bastionName(cluster.Name)}, server) + if client.IgnoreNotFound(err) != nil { + return err + } + if apierrors.IsNotFound(err) { + return nil + } + + return r.Client.Delete(ctx, server) +} + +// reconcileBastionServer reconciles the OpenStackServer object for the OpenStackCluster bastion. +// It returns the OpenStackServer object, a boolean indicating if the reconciliation should continue +// and an error if any. +func (r *OpenStackClusterReconciler) reconcileBastionServer(ctx context.Context, scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, bool, error) { + server, err := r.getBastionServer(ctx, openStackCluster, cluster) + if client.IgnoreNotFound(err) != nil { + scope.Logger().Error(err, "Failed to get the bastion OpenStackServer object") + return nil, true, err + } + bastionNotFound := apierrors.IsNotFound(err) + + // If the bastion is not enabled, we don't need to create it and continue with the reconciliation. + if bastionNotFound && !openStackCluster.Spec.Bastion.IsEnabled() { + return nil, false, nil + } + + // If the bastion is found but is not enabled, we need to delete it and reconcile. + if !bastionNotFound && !openStackCluster.Spec.Bastion.IsEnabled() { + scope.Logger().Info("Bastion is not enabled, deleting the OpenStackServer object") + if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil { + return nil, true, err + } + return nil, true, nil + } + + // If the bastion is found but the spec has changed, we need to delete it and reconcile. + bastionServerSpec := bastionToOpenStackServerSpec(openStackCluster) + if !bastionNotFound && server != nil && !apiequality.Semantic.DeepEqual(bastionServerSpec, &server.Spec) { + scope.Logger().Info("Bastion spec has changed, re-creating the OpenStackServer object") + if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil { + return nil, true, err + } + return nil, true, nil + } + + // If the bastion is not found, we need to create it. + if bastionNotFound { + scope.Logger().Info("Creating the bastion OpenStackServer object") + server, err = r.createBastionServer(ctx, openStackCluster, cluster) + if err != nil { + return nil, true, err + } + return server, true, nil + } + + // If the bastion server is not ready, we need to wait for it to be ready and reconcile. + if !server.Status.Ready { + scope.Logger().Info("Waiting for the bastion OpenStackServer to be ready") + return server, true, nil + } + + return server, false, nil +} + +// getBastionServer returns the OpenStackServer object for the bastion server. +// It returns the OpenStackServer object and an error if any. +func (r *OpenStackClusterReconciler) getBastionServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, error) { + bastionServer := &infrav1alpha1.OpenStackServer{} + bastionServerName := client.ObjectKey{ + Namespace: openStackCluster.Namespace, + Name: bastionName(cluster.Name), + } + err := r.Client.Get(ctx, bastionServerName, bastionServer) + if err != nil { + return nil, err + } + return bastionServer, nil +} + +// createBastionServer creates the OpenStackServer object for the bastion server. +// It returns the OpenStackServer object and an error if any. +func (r *OpenStackClusterReconciler) createBastionServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, error) { + bastionServerSpec := bastionToOpenStackServerSpec(openStackCluster) + bastionServer := &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + clusterv1.ClusterNameLabel: openStackCluster.Labels[clusterv1.ClusterNameLabel], + }, + Name: bastionName(cluster.Name), + Namespace: openStackCluster.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: openStackCluster.APIVersion, + Kind: openStackCluster.Kind, + Name: openStackCluster.Name, + UID: openStackCluster.UID, + }, + }, + }, + Spec: *bastionServerSpec, + } + + if err := r.Client.Create(ctx, bastionServer); err != nil { + return nil, fmt.Errorf("failed to create bastion server: %w", err) + } + return bastionServer, nil +} + +// bastionToOpenStackServerSpec converts the OpenStackMachineSpec for the bastion to an OpenStackServerSpec. +// It returns the OpenStackServerSpec and an error if any. +func bastionToOpenStackServerSpec(openStackCluster *infrav1.OpenStackCluster) *infrav1alpha1.OpenStackServerSpec { bastion := openStackCluster.Spec.Bastion if bastion == nil { bastion = &infrav1.Bastion{} @@ -559,25 +557,14 @@ func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, cluster * // v1beta1 API validations prevent this from happening in normal circumstances. bastion.Spec = &infrav1.OpenStackMachineSpec{} } - resolved := openStackCluster.Status.Bastion.Resolved - if resolved == nil { - return nil, errors.New("bastion resolved is nil") - } - machineSpec := bastion.Spec - instanceSpec := &compute.InstanceSpec{ - Name: bastionName(cluster.Name), - Flavor: machineSpec.Flavor, - SSHKeyName: machineSpec.SSHKeyName, - ImageID: resolved.ImageID, - RootVolume: machineSpec.RootVolume, - ServerGroupID: resolved.ServerGroupID, - Tags: compute.InstanceTags(machineSpec, openStackCluster), - } + az := "" if bastion.AvailabilityZone != nil { - instanceSpec.FailureDomain = *bastion.AvailabilityZone + az = *bastion.AvailabilityZone } - return instanceSpec, nil + openStackServerSpec := openStackMachineSpecToOpenStackServerSpec(bastion.Spec, openStackCluster.Spec.IdentityRef, compute.InstanceTags(bastion.Spec, openStackCluster), az, nil, getBastionSecurityGroupID(openStackCluster), openStackCluster.Status.Network.ID) + + return openStackServerSpec } func bastionName(clusterResourceName string) string { @@ -597,34 +584,6 @@ func getBastionSecurityGroupID(openStackCluster *infrav1.OpenStackCluster) *stri return nil } -func getOrCreateBastionPorts(openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service) error { - desiredPorts := openStackCluster.Status.Bastion.Resolved.Ports - resources := openStackCluster.Status.Bastion.Resources - if resources == nil { - return errors.New("bastion resources are nil") - } - - if len(desiredPorts) == len(resources.Ports) { - return nil - } - - err := networkingService.CreatePorts(openStackCluster, desiredPorts, resources) - if err != nil { - return fmt.Errorf("failed to create ports for bastion %s: %w", bastionName(openStackCluster.Name), err) - } - - return nil -} - -// bastionHashHasChanged returns a boolean whether if the latest bastion hash, built from the instance spec, has changed or not. -func bastionHashHasChanged(computeHash string, clusterAnnotations map[string]string) bool { - latestHash, ok := clusterAnnotations[BastionInstanceHashAnnotation] - if !ok { - return false - } - return latestHash != computeHash -} - func resolveLoadBalancerNetwork(openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service) error { lbSpec := openStackCluster.Spec.APIServerLoadBalancer if lbSpec.IsEnabled() { diff --git a/controllers/openstackcluster_controller_test.go b/controllers/openstackcluster_controller_test.go index 807f489e24..141b7977c6 100644 --- a/controllers/openstackcluster_controller_test.go +++ b/controllers/openstackcluster_controller_test.go @@ -22,12 +22,7 @@ import ( "reflect" "testing" - "github.com/google/go-cmp/cmp" - "github.com/gophercloud/gophercloud/v2" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" . "github.com/onsi/ginkgo/v2" //nolint:revive . "github.com/onsi/gomega" //nolint:revive @@ -207,9 +202,6 @@ var _ = Describe("OpenStackCluster controller", func() { testCluster.Status = infrav1.OpenStackClusterStatus{ Bastion: &infrav1.BastionStatus{ ID: "bastion-uuid", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - }, }, } err = k8sClient.Status().Update(ctx, testCluster) @@ -219,257 +211,10 @@ var _ = Describe("OpenStackCluster controller", func() { Expect(err).To(BeNil()) scope := scope.NewWithLogger(clientScope, log) - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.GetServer("bastion-uuid").Return(nil, gophercloud.ErrResourceNotFound{}) - - err = deleteBastion(scope, capiCluster, testCluster) + err = reconciler.deleteBastion(ctx, scope, capiCluster, testCluster) Expect(err).To(BeNil()) Expect(testCluster.Status.Bastion).To(BeNil()) }) - It("should adopt an existing bastion even if its uuid is not stored in status", func() { - testCluster.SetName("adopt-existing-bastion") - testCluster.Spec = infrav1.OpenStackClusterSpec{ - Bastion: &infrav1.Bastion{ - Enabled: ptr.To(true), - Spec: &bastionSpec, - }, - } - err := k8sClient.Create(ctx, testCluster) - Expect(err).To(BeNil()) - err = k8sClient.Create(ctx, capiCluster) - Expect(err).To(BeNil()) - testCluster.Status = infrav1.OpenStackClusterStatus{ - Bastion: &infrav1.BastionStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - }, - Network: &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - Name: "network-name", - ID: "network-id", - }, - }, - } - err = k8sClient.Status().Update(ctx, testCluster) - Expect(err).To(BeNil()) - - log := GinkgoLogr - clientScope, err := mockScopeFactory.NewClientScopeFromObject(ctx, k8sClient, nil, log, testCluster) - Expect(err).To(BeNil()) - scope := scope.NewWithLogger(clientScope, log) - - server := servers.Server{ - ID: "adopted-bastion-uuid", - Status: "ACTIVE", - } - - networkClientRecorder := mockScopeFactory.NetworkClient.EXPECT() - networkClientRecorder.ListPort(gomock.Any()).Return([]ports.Port{{ID: "portID1"}}, nil) - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.ListServers(servers.ListOpts{ - Name: "^capi-cluster-bastion$", - }).Return([]servers.Server{server}, nil) - - networkClientRecorder.ListFloatingIP(floatingips.ListOpts{PortID: "portID1"}).Return(make([]floatingips.FloatingIP, 1), nil) - - res, err := reconcileBastion(scope, capiCluster, testCluster) - expectedStatus := &infrav1.BastionStatus{ - ID: "adopted-bastion-uuid", - State: "ACTIVE", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - } - Expect(testCluster.Status.Bastion).To(Equal(expectedStatus), cmp.Diff(testCluster.Status.Bastion, expectedStatus)) - Expect(err).To(BeNil()) - Expect(res).To(BeNil()) - }) - It("should adopt an existing bastion Floating IP if even if its uuid is not stored in status", func() { - testCluster.SetName("requeue-bastion") - testCluster.Spec = infrav1.OpenStackClusterSpec{ - Bastion: &infrav1.Bastion{ - Enabled: ptr.To(true), - Spec: &bastionSpec, - }, - } - err := k8sClient.Create(ctx, testCluster) - Expect(err).To(BeNil()) - err = k8sClient.Create(ctx, capiCluster) - Expect(err).To(BeNil()) - testCluster.Status = infrav1.OpenStackClusterStatus{ - Network: &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - Name: "network-name", - ID: "network-id", - }, - }, - Bastion: &infrav1.BastionStatus{ - ID: "adopted-fip-bastion-uuid", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - }, - } - err = k8sClient.Status().Update(ctx, testCluster) - Expect(err).To(BeNil()) - - log := GinkgoLogr - clientScope, err := mockScopeFactory.NewClientScopeFromObject(ctx, k8sClient, nil, log, testCluster) - Expect(err).To(BeNil()) - scope := scope.NewWithLogger(clientScope, log) - - server := servers.Server{ - ID: "adopted-fip-bastion-uuid", - Status: "ACTIVE", - } - - networkClientRecorder := mockScopeFactory.NetworkClient.EXPECT() - networkClientRecorder.ListPort(gomock.Any()).Return([]ports.Port{{ID: "portID1"}}, nil) - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.GetServer("adopted-fip-bastion-uuid").Return(&server, nil) - - networkClientRecorder.ListFloatingIP(floatingips.ListOpts{PortID: "portID1"}).Return([]floatingips.FloatingIP{{FloatingIP: "1.2.3.4"}}, nil) - - res, err := reconcileBastion(scope, capiCluster, testCluster) - Expect(testCluster.Status.Bastion).To(Equal(&infrav1.BastionStatus{ - ID: "adopted-fip-bastion-uuid", - FloatingIP: "1.2.3.4", - State: "ACTIVE", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - })) - Expect(err).To(BeNil()) - Expect(res).To(BeNil()) - }) - It("should requeue until bastion becomes active", func() { - testCluster.SetName("requeue-bastion") - testCluster.Spec = infrav1.OpenStackClusterSpec{ - Bastion: &infrav1.Bastion{ - Enabled: ptr.To(true), - Spec: &bastionSpec, - }, - } - err := k8sClient.Create(ctx, testCluster) - Expect(err).To(BeNil()) - err = k8sClient.Create(ctx, capiCluster) - Expect(err).To(BeNil()) - testCluster.Status = infrav1.OpenStackClusterStatus{ - Network: &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - ID: "network-id", - Name: "network-name", - }, - }, - Bastion: &infrav1.BastionStatus{ - ID: "requeue-bastion-uuid", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - }, - } - err = k8sClient.Status().Update(ctx, testCluster) - Expect(err).To(BeNil()) - - log := GinkgoLogr - clientScope, err := mockScopeFactory.NewClientScopeFromObject(ctx, k8sClient, nil, log, testCluster) - Expect(err).To(BeNil()) - scope := scope.NewWithLogger(clientScope, log) - - server := servers.Server{ - ID: "requeue-bastion-uuid", - Status: "BUILD", - } - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.GetServer("requeue-bastion-uuid").Return(&server, nil) - - res, err := reconcileBastion(scope, capiCluster, testCluster) - Expect(testCluster.Status.Bastion).To(Equal(&infrav1.BastionStatus{ - ID: "requeue-bastion-uuid", - State: "BUILD", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - })) - Expect(err).To(BeNil()) - Expect(res).To(Equal(&reconcile.Result{RequeueAfter: waitForBuildingInstanceToReconcile})) - }) It("should delete an existing bastion even if its uuid is not stored in status", func() { testCluster.SetName("delete-existing-bastion") testCluster.Spec = infrav1.OpenStackClusterSpec{} @@ -478,11 +223,6 @@ var _ = Describe("OpenStackCluster controller", func() { err = k8sClient.Create(ctx, capiCluster) Expect(err).To(BeNil()) testCluster.Status = infrav1.OpenStackClusterStatus{ - Bastion: &infrav1.BastionStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - }, - }, Network: &infrav1.NetworkStatusWithSubnets{ NetworkStatus: infrav1.NetworkStatus{ ID: "network-id", @@ -497,16 +237,7 @@ var _ = Describe("OpenStackCluster controller", func() { Expect(err).To(BeNil()) scope := scope.NewWithLogger(clientScope, log) - server := servers.Server{ID: "delete-bastion-uuid"} - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.ListServers(servers.ListOpts{ - Name: "^capi-cluster-bastion$", - }).Return([]servers.Server{server}, nil) - computeClientRecorder.DeleteServer("delete-bastion-uuid").Return(nil) - computeClientRecorder.GetServer("delete-bastion-uuid").Return(nil, gophercloud.ErrResourceNotFound{}) - - err = deleteBastion(scope, capiCluster, testCluster) + err = reconciler.deleteBastion(ctx, scope, capiCluster, testCluster) Expect(err).To(BeNil()) }) diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index b1c649f596..f1e9b362b1 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "encoding/base64" "errors" "fmt" "time" @@ -27,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" @@ -47,12 +45,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/loadbalancer" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/names" ) // OpenStackMachineReconciler reconciles a OpenStackMachine object. @@ -155,37 +153,13 @@ func (r *OpenStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req // Handle deleted machines if !openStackMachine.DeletionTimestamp.IsZero() { - return r.reconcileDelete(scope, clusterResourceName, infraCluster, machine, openStackMachine) + return r.reconcileDelete(ctx, scope, clusterResourceName, infraCluster, machine, openStackMachine) } // Handle non-deleted clusters return r.reconcileNormal(ctx, scope, clusterResourceName, infraCluster, machine, openStackMachine) } -func resolveMachineResources(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine) (bool, error) { - resolved := openStackMachine.Status.Resolved - if resolved == nil { - resolved = &infrav1.ResolvedMachineSpec{} - openStackMachine.Status.Resolved = resolved - } - // Resolve and store resources - return compute.ResolveMachineSpec(scope, - &openStackMachine.Spec, resolved, - clusterResourceName, openStackMachine.Name, - openStackCluster, getManagedSecurityGroup(openStackCluster, machine)) -} - -func adoptMachineResources(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine) error { - resources := openStackMachine.Status.Resources - if resources == nil { - resources = &infrav1.MachineResources{} - openStackMachine.Status.Resources = resources - } - - // Adopt any existing resources - return compute.AdoptMachineResources(scope, openStackMachine.Status.Resolved, resources) -} - func patchMachine(ctx context.Context, patchHelper *patch.Helper, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine, options ...patch.Option) error { // Always update the readyCondition by summarizing the state of other conditions. applicableConditions := []clusterv1.ConditionType{ @@ -235,10 +209,19 @@ func (r *OpenStackMachineReconciler) SetupWithManager(ctx context.Context, mgr c &ipamv1.IPAddressClaim{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &infrav1.OpenStackMachine{}), ). + // TODO(emilien) to optimize because it's not efficient to watch all OpenStackServer events. + // We are only interested in certain state transitions of the OpenStackServer: + // - when the server is deleted + // - when the server becomes ready + // For that we probably want to write Predicate functions for the OpenStackServer. + Watches( + &infrav1alpha1.OpenStackServer{}, + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &infrav1.OpenStackMachine{}), + ). Complete(r) } -func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam +func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam scope.Logger().Info("Reconciling Machine delete") computeService, err := compute.NewService(scope) @@ -246,11 +229,6 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl return ctrl.Result{}, err } - networkingService, err := networking.NewService(scope) - if err != nil { - return ctrl.Result{}, err - } - // Nothing to do if the cluster is not ready because no machine resources were created. if !openStackCluster.Status.Ready || openStackCluster.Status.Network == nil { // The finalizer should not have been added yet in this case, @@ -259,38 +237,17 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl return ctrl.Result{}, nil } - // For machines created after v0.10, or any machine which has been - // reconciled at least once by v0.10 or later, status.Resolved always - // exists before any resources are created. We can therefore assume - // that if it does not exist, no resources were created. - // - // There is an upgrade edge case where a machine may have been marked - // deleted before upgrade but we are completing it after upgrade. For - // this use case only we make a best effort to resolve resources before - // continuing, but if we get an error we log it and continue anyway. - // This has the potential to leak resources, but only in this specific - // edge case. The alternative is to continue retrying until it succeeds, - // but that risks never deleting a machine which cannot be resolved due - // to a spec error. - // - // This code can and should be deleted in a future release when we are - // sure that all machines have been reconciled at least by a v0.10 or - // later controller. - if _, err := resolveMachineResources(scope, clusterResourceName, openStackCluster, openStackMachine, machine); err != nil { - // Return the error, but allow the resource to be removed anyway. - controllerutil.RemoveFinalizer(openStackMachine, infrav1.MachineFinalizer) + machineServer, err := r.getMachineServer(ctx, openStackMachine) + if client.IgnoreNotFound(err) != nil { return ctrl.Result{}, err } - // Check for any orphaned resources - // N.B. Unlike resolveMachineResources, we must always look for orphaned resources in the delete path. - if err := adoptMachineResources(scope, openStackMachine); err != nil { - return ctrl.Result{}, fmt.Errorf("adopting machine resources: %w", err) - } - - instanceStatus, err := getInstanceStatus(openStackMachine, computeService) - if err != nil { - return ctrl.Result{}, err + var instanceStatus *compute.InstanceStatus + if machineServer != nil && machineServer.Status.InstanceID != nil { + instanceStatus, err = computeService.GetInstanceStatus(*machineServer.Status.InstanceID) + if err != nil { + return ctrl.Result{}, err + } } if util.IsControlPlaneMachine(machine) { @@ -299,35 +256,16 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl } } - // If no instance was created we currently need to check for orphaned - // volumes. - if instanceStatus == nil { - if err := computeService.DeleteVolumes(openStackMachine.Name, openStackMachine.Spec.RootVolume, openStackMachine.Spec.AdditionalBlockDevices); err != nil { - return ctrl.Result{}, fmt.Errorf("delete volumes: %w", err) - } - } else { - if err := computeService.DeleteInstance(openStackMachine, instanceStatus); err != nil { + if machineServer != nil { + scope.Logger().Info("Deleting server", "name", machineServer.Name) + if err := r.Client.Delete(ctx, machineServer); err != nil { conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceDeleteFailedReason, clusterv1.ConditionSeverityError, "Deleting instance failed: %v", err) - return ctrl.Result{}, fmt.Errorf("delete instance: %w", err) - } - } - - trunkSupported, err := networkingService.IsTrunkExtSupported() - if err != nil { - return ctrl.Result{}, err - } - - if openStackMachine.Status.Resources != nil { - portsStatus := openStackMachine.Status.Resources.Ports - for _, port := range portsStatus { - if err := networkingService.DeleteInstanceTrunkAndPort(openStackMachine, port, trunkSupported); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to delete port %q: %w", port.ID, err) - } + return ctrl.Result{}, err } - } - - if err := r.reconcileDeleteFloatingAddressFromPool(scope, openStackMachine); err != nil { - return ctrl.Result{}, err + // If the server was found, we need to wait for it to be deleted before + // removing the OpenStackMachine finalizer. + scope.Logger().Info("Waiting for server to be deleted before removing finalizer") + return ctrl.Result{}, nil } controllerutil.RemoveFinalizer(openStackMachine, infrav1.MachineFinalizer) @@ -335,13 +273,6 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl return ctrl.Result{}, nil } -func getInstanceStatus(openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service) (*compute.InstanceStatus, error) { - if openStackMachine.Status.InstanceID != nil { - return computeService.GetInstanceStatus(*openStackMachine.Status.InstanceID) - } - return computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) -} - func removeAPIServerEndpoint(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceStatus *compute.InstanceStatus, clusterResourceName string) error { if openStackCluster.Spec.APIServerLoadBalancer.IsEnabled() { loadBalancerService, err := loadbalancer.NewService(scope) @@ -400,136 +331,6 @@ func GetPortIDs(ports []infrav1.PortStatus) []string { return portIDs } -// reconcileFloatingAddressFromPool reconciles the floating IP address from the pool. -// It returns the IPAddressClaim and a boolean indicating if the IPAddressClaim is ready. -func (r *OpenStackMachineReconciler) reconcileFloatingAddressFromPool(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) (*ipamv1.IPAddressClaim, bool, error) { - if openStackMachine.Spec.FloatingIPPoolRef == nil { - return nil, false, nil - } - var claim *ipamv1.IPAddressClaim - claim, err := r.getOrCreateIPAddressClaimForFloatingAddress(ctx, scope, openStackMachine, openStackCluster) - if err != nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityInfo, "Failed to reconcile floating IP claims: %v", err) - return nil, true, err - } - if claim.Status.AddressRef.Name == "" { - r.Recorder.Eventf(openStackMachine, corev1.EventTypeNormal, "WaitingForIPAddressClaim", "Waiting for IPAddressClaim %s/%s to be allocated", claim.Namespace, claim.Name) - return claim, true, nil - } - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) - return claim, false, nil -} - -// createIPAddressClaim creates IPAddressClaim for the FloatingAddressFromPool if it does not exist yet. -func (r *OpenStackMachineReconciler) getOrCreateIPAddressClaimForFloatingAddress(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) (*ipamv1.IPAddressClaim, error) { - var err error - - poolRef := openStackMachine.Spec.FloatingIPPoolRef - claimName := names.GetFloatingAddressClaimName(openStackMachine.Name) - claim := &ipamv1.IPAddressClaim{} - - err = r.Client.Get(ctx, client.ObjectKey{Namespace: openStackMachine.Namespace, Name: claimName}, claim) - if err == nil { - return claim, nil - } else if client.IgnoreNotFound(err) != nil { - return nil, err - } - - claim = &ipamv1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: claimName, - Namespace: openStackMachine.Namespace, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: openStackCluster.Labels[clusterv1.ClusterNameLabel], - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: openStackMachine.APIVersion, - Kind: openStackMachine.Kind, - Name: openStackMachine.Name, - UID: openStackMachine.UID, - }, - }, - Finalizers: []string{infrav1.IPClaimMachineFinalizer}, - }, - Spec: ipamv1.IPAddressClaimSpec{ - PoolRef: *poolRef, - }, - } - - if err := r.Client.Create(ctx, claim); err != nil { - return nil, err - } - - r.Recorder.Eventf(openStackMachine, corev1.EventTypeNormal, "CreatingIPAddressClaim", "Creating IPAddressClaim %s/%s", claim.Namespace, claim.Name) - scope.Logger().Info("Created IPAddressClaim", "name", claim.Name) - return claim, nil -} - -func (r *OpenStackMachineReconciler) associateIPAddressFromIPAddressClaim(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, instanceStatus *compute.InstanceStatus, instanceNS *compute.InstanceNetworkStatus, claim *ipamv1.IPAddressClaim) error { - address := &ipamv1.IPAddress{} - addressKey := client.ObjectKey{Namespace: openStackMachine.Namespace, Name: claim.Status.AddressRef.Name} - - if err := r.Client.Get(ctx, addressKey, address); err != nil { - return err - } - - instanceAddresses := instanceNS.Addresses() - for _, instanceAddress := range instanceAddresses { - if instanceAddress.Address == address.Spec.Address { - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) - return nil - } - } - - networkingService, err := networking.NewService(scope) - if err != nil { - return err - } - - fip, err := networkingService.GetFloatingIP(address.Spec.Address) - if err != nil { - return err - } - - if fip == nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "floating IP does not exist") - return fmt.Errorf("floating IP %q does not exist", address.Spec.Address) - } - - port, err := networkingService.GetPortForExternalNetwork(instanceStatus.ID(), fip.FloatingNetworkID) - if err != nil { - return fmt.Errorf("get port for floating IP %q: %w", fip.FloatingIP, err) - } - - if port == nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Can't find port for floating IP %q on external network %s", fip.FloatingIP, fip.FloatingNetworkID) - return fmt.Errorf("port for floating IP %q on network %s does not exist", fip.FloatingIP, fip.FloatingNetworkID) - } - - if err = networkingService.AssociateFloatingIP(openStackMachine, fip, port.ID); err != nil { - return err - } - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) - return nil -} - -func (r *OpenStackMachineReconciler) reconcileDeleteFloatingAddressFromPool(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine) error { - log := scope.Logger().WithValues("openStackMachine", openStackMachine.Name) - log.Info("Reconciling Machine delete floating address from pool") - if openStackMachine.Spec.FloatingIPPoolRef == nil { - return nil - } - claimName := names.GetFloatingAddressClaimName(openStackMachine.Name) - claim := &ipamv1.IPAddressClaim{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: openStackMachine.Namespace, Name: claimName}, claim); err != nil { - return client.IgnoreNotFound(err) - } - - controllerutil.RemoveFinalizer(claim, infrav1.IPClaimMachineFinalizer) - return r.Client.Update(context.Background(), claim) -} - func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (_ ctrl.Result, reterr error) { var err error @@ -552,69 +353,29 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } - changed, err := resolveMachineResources(scope, clusterResourceName, openStackCluster, openStackMachine, machine) - if err != nil { - return ctrl.Result{}, err - } - - // Also add the finalizer when writing resolved resources so we can start creating resources on the next reconcile. if controllerutil.AddFinalizer(openStackMachine, infrav1.MachineFinalizer) { - changed = true - } - - // We requeue if we either added the finalizer or resolved machine - // resources. This means that we never create any resources unless we - // have observed that the finalizer and resolved machine resources were - // successfully written in a previous transaction. This in turn means - // that in the delete path we can be sure that if there are no resolved - // resources then no resources were created. - if changed { - scope.Logger().V(6).Info("Machine resources updated, requeuing") return ctrl.Result{}, nil } - // Check for orphaned resources previously created but not written to the status - if err := adoptMachineResources(scope, openStackMachine); err != nil { - return ctrl.Result{}, fmt.Errorf("adopting machine resources: %w", err) - } - scope.Logger().Info("Reconciling Machine") - userData, err := r.getBootstrapData(ctx, machine, openStackMachine) - if err != nil { - return ctrl.Result{}, err - } - computeService, err := compute.NewService(scope) - if err != nil { - return ctrl.Result{}, err - } - - floatingAddressClaim, waitingForFloatingAddress, err := r.reconcileFloatingAddressFromPool(ctx, scope, openStackMachine, openStackCluster) - if err != nil || waitingForFloatingAddress { + machineServer, waitingForServer, err := r.reconcileMachineServer(ctx, scope, openStackMachine, openStackCluster, machine) + if err != nil || waitingForServer { return ctrl.Result{}, err } - networkingService, err := networking.NewService(scope) - if err != nil { - return ctrl.Result{}, err - } - - err = getOrCreateMachinePorts(openStackMachine, networkingService) + computeService, err := compute.NewService(scope) if err != nil { return ctrl.Result{}, err } - portIDs := GetPortIDs(openStackMachine.Status.Resources.Ports) - instanceStatus, err := r.getOrCreateInstance(scope.Logger(), openStackCluster, machine, openStackMachine, computeService, userData, portIDs) - if err != nil || instanceStatus == nil { - // Conditions set in getOrCreateInstance + // instanceStatus is required for the API server load balancer and floating IP reconciliation + // when Octavia is enabled. + var instanceStatus *compute.InstanceStatus + if instanceStatus, err = computeService.GetInstanceStatus(*machineServer.Status.InstanceID); err != nil { return ctrl.Result{}, err } - state := instanceStatus.State() - openStackMachine.Status.InstanceID = ptr.To(instanceStatus.ID()) - openStackMachine.Status.InstanceState = &state - instanceNS, err := instanceStatus.NetworkStatus() if err != nil { return ctrl.Result{}, fmt.Errorf("get network status: %w", err) @@ -631,63 +392,204 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope }) openStackMachine.Status.Addresses = addresses - if floatingAddressClaim != nil { - if err := r.associateIPAddressFromIPAddressClaim(ctx, scope, openStackMachine, instanceStatus, instanceNS, floatingAddressClaim); err != nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Failed while associating ip from pool: %v", err) - return ctrl.Result{}, err - } - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) + result := r.reconcileMachineState(scope, openStackMachine, machine, machineServer) + if result != nil { + return *result, nil + } + + if !util.IsControlPlaneMachine(machine) { + scope.Logger().Info("Not a Control plane machine, no floating ip reconcile needed, Reconciled Machine create successfully") + return ctrl.Result{}, nil + } + + err = r.reconcileAPIServerLoadBalancer(scope, openStackCluster, openStackMachine, instanceStatus, instanceNS, clusterResourceName) + if err != nil { + return ctrl.Result{}, err } - switch instanceStatus.State() { + conditions.MarkTrue(openStackMachine, infrav1.APIServerIngressReadyCondition) + scope.Logger().Info("Reconciled Machine create successfully") + return ctrl.Result{}, nil +} + +// reconcileMachineState updates the conditions of the OpenStackMachine instance based on the instance state +// and sets the ProviderID and Ready fields when the instance is active. +// It returns a reconcile request if the instance is not yet active. +func (r *OpenStackMachineReconciler) reconcileMachineState(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine, openStackServer *infrav1alpha1.OpenStackServer) *ctrl.Result { + switch *openStackServer.Status.InstanceState { case infrav1.InstanceStateActive: - scope.Logger().Info("Machine instance state is ACTIVE", "id", instanceStatus.ID()) + scope.Logger().Info("Machine instance state is ACTIVE", "id", openStackServer.Status.InstanceID) conditions.MarkTrue(openStackMachine, infrav1.InstanceReadyCondition) // Set properties required by CAPI machine controller - openStackMachine.Spec.ProviderID = ptr.To(fmt.Sprintf("openstack:///%s", instanceStatus.ID())) + openStackMachine.Spec.ProviderID = ptr.To(fmt.Sprintf("openstack:///%s", *openStackServer.Status.InstanceID)) openStackMachine.Status.Ready = true case infrav1.InstanceStateError: // If the machine has a NodeRef then it must have been working at some point, // so the error could be something temporary. // If not, it is more likely a configuration error so we set failure and never retry. - scope.Logger().Info("Machine instance state is ERROR", "id", instanceStatus.ID()) + scope.Logger().Info("Machine instance state is ERROR", "id", openStackServer.Status.InstanceID) if machine.Status.NodeRef == nil { - err = fmt.Errorf("instance state %q is unexpected", instanceStatus.State()) + err := fmt.Errorf("instance state %v is unexpected", openStackServer.Status.InstanceState) openStackMachine.SetFailure(capierrors.UpdateMachineError, err) } conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStateErrorReason, clusterv1.ConditionSeverityError, "") - return ctrl.Result{}, nil + return &ctrl.Result{} case infrav1.InstanceStateDeleted: // we should avoid further actions for DELETED VM scope.Logger().Info("Machine instance state is DELETED, no actions") conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceDeletedReason, clusterv1.ConditionSeverityError, "") - return ctrl.Result{}, nil + return &ctrl.Result{} case infrav1.InstanceStateBuild, infrav1.InstanceStateUndefined: - scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) - return ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", openStackServer.Status.InstanceID, "status", openStackServer.Status.InstanceState) + return &ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile} default: // The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE // due to potential conflict or unexpected actions - scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) - conditions.MarkUnknown(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %s", instanceStatus.State()) + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", openStackServer.Status.InstanceID, "status", openStackServer.Status.InstanceState) + conditions.MarkUnknown(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %v", openStackServer.Status.InstanceState) - return ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile}, nil + return &ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile} } + return nil +} - if !util.IsControlPlaneMachine(machine) { - scope.Logger().Info("Not a Control plane machine, no floating ip reconcile needed, Reconciled Machine create successfully") - return ctrl.Result{}, nil +func (r *OpenStackMachineReconciler) getMachineServer(ctx context.Context, openStackMachine *infrav1.OpenStackMachine) (*infrav1alpha1.OpenStackServer, error) { + machineServer := &infrav1alpha1.OpenStackServer{} + machineServerName := client.ObjectKey{ + Namespace: openStackMachine.Namespace, + Name: openStackMachine.Name, + } + err := r.Client.Get(ctx, machineServerName, machineServer) + if err != nil { + return nil, err } + return machineServer, nil +} - err = r.reconcileAPIServerLoadBalancer(scope, openStackCluster, openStackMachine, instanceStatus, instanceNS, clusterResourceName) +// openStackMachineSpecToOpenStackServerSpec converts an OpenStackMachineSpec to an OpenStackServerSpec. +// It returns the OpenStackServerSpec object and an error if there is any. +func openStackMachineSpecToOpenStackServerSpec(openStackMachineSpec *infrav1.OpenStackMachineSpec, identityRef infrav1.OpenStackIdentityReference, tags []string, failureDomain string, userDataRef *corev1.LocalObjectReference, defaultSecGroup *string, defaultNetworkID string) *infrav1alpha1.OpenStackServerSpec { + openStackServerSpec := &infrav1alpha1.OpenStackServerSpec{ + AdditionalBlockDevices: openStackMachineSpec.AdditionalBlockDevices, + ConfigDrive: openStackMachineSpec.ConfigDrive, + Flavor: openStackMachineSpec.Flavor, + IdentityRef: identityRef, + Image: openStackMachineSpec.Image, + RootVolume: openStackMachineSpec.RootVolume, + ServerMetadata: openStackMachineSpec.ServerMetadata, + SSHKeyName: openStackMachineSpec.SSHKeyName, + } + + if len(tags) > 0 { + openStackServerSpec.Tags = tags + } + + if failureDomain != "" { + openStackServerSpec.AvailabilityZone = &failureDomain + } + + if userDataRef != nil { + openStackServerSpec.UserDataRef = userDataRef + } + + if openStackMachineSpec.Trunk { + openStackServerSpec.Trunk = ptr.To(true) + } + + if openStackMachineSpec.FloatingIPPoolRef != nil { + openStackServerSpec.FloatingIPPoolRef = openStackMachineSpec.FloatingIPPoolRef + } + + // If not ports are provided we create one. + // Ports must have a network so if none is provided we use the default network. + serverPorts := openStackMachineSpec.Ports + if len(openStackMachineSpec.Ports) == 0 { + serverPorts = make([]infrav1.PortOpts, 1) + } + for i := range serverPorts { + if serverPorts[i].Network == nil { + serverPorts[i].Network = &infrav1.NetworkParam{ + ID: &defaultNetworkID, + } + } + if len(serverPorts[i].SecurityGroups) == 0 && defaultSecGroup != nil { + serverPorts[i].SecurityGroups = []infrav1.SecurityGroupParam{ + { + ID: defaultSecGroup, + }, + } + } + } + openStackServerSpec.Ports = serverPorts + + return openStackServerSpec +} + +// reconcileMachineServer reconciles the OpenStackServer object for the OpenStackMachine. +// It returns the OpenStackServer object and a boolean indicating if the OpenStackServer is ready. +func (r *OpenStackMachineReconciler) reconcileMachineServer(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine) (*infrav1alpha1.OpenStackServer, bool, error) { + var server *infrav1alpha1.OpenStackServer + server, err := r.getOrCreateMachineServer(ctx, openStackCluster, openStackMachine, machine) if err != nil { - return ctrl.Result{}, err + // If an error occurs while getting or creating the OpenStackServer, + // we won't requeue the request so reconcileNormal can add conditions to the OpenStackMachine + // and we can see the error in the logs. + scope.Logger().Error(err, "Failed to get or create OpenStackServer") + return server, false, err + } + if !server.Status.Ready { + scope.Logger().Info("Waiting for OpenStackServer to be ready", "name", server.Name) + return server, true, nil } + return server, false, nil +} - conditions.MarkTrue(openStackMachine, infrav1.APIServerIngressReadyCondition) - scope.Logger().Info("Reconciled Machine create successfully") - return ctrl.Result{}, nil +// getOrCreateMachineServer gets or creates the OpenStackServer object for the OpenStackMachine. +// It returns the OpenStackServer object and an error if there is any. +func (r *OpenStackMachineReconciler) getOrCreateMachineServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine) (*infrav1alpha1.OpenStackServer, error) { + if machine.Spec.Bootstrap.DataSecretName == nil { + return nil, errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") + } + userDataRef := &corev1.LocalObjectReference{ + Name: *machine.Spec.Bootstrap.DataSecretName, + } + + var failureDomain string + if machine.Spec.FailureDomain != nil { + failureDomain = *machine.Spec.FailureDomain + } + machineServer, err := r.getMachineServer(ctx, openStackMachine) + + if client.IgnoreNotFound(err) != nil { + return nil, err + } + if apierrors.IsNotFound(err) { + machineServerSpec := openStackMachineSpecToOpenStackServerSpec(&openStackMachine.Spec, openStackCluster.Spec.IdentityRef, compute.InstanceTags(&openStackMachine.Spec, openStackCluster), failureDomain, userDataRef, getManagedSecurityGroup(openStackCluster, machine), openStackCluster.Status.Network.ID) + machineServer = &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + clusterv1.ClusterNameLabel: openStackCluster.Labels[clusterv1.ClusterNameLabel], + }, + Name: openStackMachine.Name, + Namespace: openStackMachine.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: openStackMachine.APIVersion, + Kind: openStackMachine.Kind, + Name: openStackMachine.Name, + UID: openStackMachine.UID, + }, + }, + }, + Spec: *machineServerSpec, + } + + if err := r.Client.Create(ctx, machineServer); err != nil { + return nil, fmt.Errorf("failed to create machine server: %w", err) + } + } + return machineServer, nil } func (r *OpenStackMachineReconciler) reconcileAPIServerLoadBalancer(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceStatus *compute.InstanceStatus, instanceNS *compute.InstanceNetworkStatus, clusterResourceName string) error { @@ -741,109 +643,6 @@ func (r *OpenStackMachineReconciler) reconcileAPIServerLoadBalancer(scope *scope return nil } -func getOrCreateMachinePorts(openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service) error { - resolved := openStackMachine.Status.Resolved - if resolved == nil { - return errors.New("machine resolved is nil") - } - resources := openStackMachine.Status.Resources - if resources == nil { - return errors.New("machine resources is nil") - } - desiredPorts := resolved.Ports - - if len(desiredPorts) == len(resources.Ports) { - return nil - } - - if err := networkingService.CreatePorts(openStackMachine, desiredPorts, resources); err != nil { - return fmt.Errorf("creating ports: %w", err) - } - - return nil -} - -func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string, portIDs []string) (*compute.InstanceStatus, error) { - var instanceStatus *compute.InstanceStatus - var err error - if openStackMachine.Status.InstanceID != nil { - instanceStatus, err = computeService.GetInstanceStatus(*openStackMachine.Status.InstanceID) - if err != nil { - logger.Info("Unable to get OpenStack instance", "name", openStackMachine.Name) - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.OpenStackErrorReason, clusterv1.ConditionSeverityError, err.Error()) - return nil, err - } - } - if instanceStatus == nil { - // Check if there is an existing instance with machine name, in case where instance ID would not have been stored in machine status - instanceStatus, err = computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) - if err != nil { - logger.Info("Unable to get OpenStack instance by name", "name", openStackMachine.Name) - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) - return nil, err - } - if instanceStatus != nil { - return instanceStatus, nil - } - if openStackMachine.Status.InstanceID != nil { - logger.Info("Not reconciling machine in failed state. The previously existing OpenStack instance is no longer available") - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists") - openStackMachine.SetFailure(capierrors.UpdateMachineError, errors.New("virtual machine no longer exists")) - return nil, nil - } - - instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) - if err != nil { - return nil, err - } - - logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name) - instanceStatus, err = computeService.CreateInstance(openStackMachine, instanceSpec, portIDs) - if err != nil { - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) - return nil, fmt.Errorf("create OpenStack instance: %w", err) - } - } - return instanceStatus, nil -} - -func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, userData string) (*compute.InstanceSpec, error) { - resolved := openStackMachine.Status.Resolved - if resolved == nil { - return nil, errors.New("machine resolved is nil") - } - - serverMetadata := make(map[string]string, len(openStackMachine.Spec.ServerMetadata)) - for i := range openStackMachine.Spec.ServerMetadata { - key := openStackMachine.Spec.ServerMetadata[i].Key - value := openStackMachine.Spec.ServerMetadata[i].Value - serverMetadata[key] = value - } - - instanceSpec := compute.InstanceSpec{ - Name: openStackMachine.Name, - ImageID: resolved.ImageID, - Flavor: openStackMachine.Spec.Flavor, - SSHKeyName: openStackMachine.Spec.SSHKeyName, - UserData: userData, - Metadata: serverMetadata, - ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, - RootVolume: openStackMachine.Spec.RootVolume, - AdditionalBlockDevices: openStackMachine.Spec.AdditionalBlockDevices, - ServerGroupID: resolved.ServerGroupID, - Trunk: openStackMachine.Spec.Trunk, - } - - // Add the failure domain only if specified - if machine.Spec.FailureDomain != nil { - instanceSpec.FailureDomain = *machine.Spec.FailureDomain - } - - instanceSpec.Tags = compute.InstanceTags(&openStackMachine.Spec, openStackCluster) - - return &instanceSpec, nil -} - // getManagedSecurityGroup returns the ID of the security group managed by the // OpenStackCluster whether it's a control plane or a worker machine. func getManagedSecurityGroup(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine) *string { @@ -851,6 +650,10 @@ func getManagedSecurityGroup(openStackCluster *infrav1.OpenStackCluster, machine return nil } + if machine == nil { + return nil + } + if util.IsControlPlaneMachine(machine) { if openStackCluster.Status.ControlPlaneSecurityGroup != nil { return &openStackCluster.Status.ControlPlaneSecurityGroup.ID @@ -906,25 +709,6 @@ func (r *OpenStackMachineReconciler) OpenStackClusterToOpenStackMachines(ctx con } } -func (r *OpenStackMachineReconciler) getBootstrapData(ctx context.Context, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (string, error) { - if machine.Spec.Bootstrap.DataSecretName == nil { - return "", errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") - } - - secret := &corev1.Secret{} - key := types.NamespacedName{Namespace: machine.Namespace, Name: *machine.Spec.Bootstrap.DataSecretName} - if err := r.Client.Get(ctx, key, secret); err != nil { - return "", fmt.Errorf("failed to retrieve bootstrap data secret for Openstack Machine %s/%s: %w", machine.Namespace, openStackMachine.Name, err) - } - - value, ok := secret.Data["value"] - if !ok { - return "", errors.New("error retrieving bootstrap data: secret value key is missing") - } - - return base64.StdEncoding.EncodeToString(value), nil -} - func (r *OpenStackMachineReconciler) requeueOpenStackMachinesForUnpausedCluster(ctx context.Context) handler.MapFunc { log := ctrl.LoggerFrom(ctx) return func(ctx context.Context, o client.Object) []ctrl.Request { diff --git a/controllers/openstackmachine_controller_test.go b/controllers/openstackmachine_controller_test.go index 5212ca4944..ed7505af76 100644 --- a/controllers/openstackmachine_controller_test.go +++ b/controllers/openstackmachine_controller_test.go @@ -17,29 +17,14 @@ limitations under the License. package controllers import ( - "fmt" "reflect" "testing" - "github.com/go-logr/logr/testr" - "github.com/google/go-cmp/cmp" - "github.com/gophercloud/gophercloud/v2" - "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/image/v2/images" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" - . "github.com/onsi/gomega" //nolint:revive - "go.uber.org/mock/gomock" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) const ( @@ -58,124 +43,71 @@ const ( failureDomain = "test-failure-domain" ) -func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { - return &infrav1.OpenStackCluster{ - Spec: infrav1.OpenStackClusterSpec{}, +func TestOpenStackMachineSpecToOpenStackServerSpec(t *testing.T) { + identityRef := infrav1.OpenStackIdentityReference{ + Name: "foo", + CloudName: "my-cloud", + } + openStackCluster := &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSecurityGroups: &infrav1.ManagedSecurityGroups{}, + }, Status: infrav1.OpenStackClusterStatus{ + WorkerSecurityGroup: &infrav1.SecurityGroupStatus{ + ID: workerSecurityGroupUUID, + }, Network: &infrav1.NetworkStatusWithSubnets{ NetworkStatus: infrav1.NetworkStatus{ ID: networkUUID, }, - Subnets: []infrav1.Subnet{ - {ID: subnetUUID}, - }, }, - ControlPlaneSecurityGroup: &infrav1.SecurityGroupStatus{ID: controlPlaneSecurityGroupUUID}, - WorkerSecurityGroup: &infrav1.SecurityGroupStatus{ID: workerSecurityGroupUUID}, }, } -} - -func getDefaultMachine() *clusterv1.Machine { - return &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - FailureDomain: ptr.To(failureDomain), - }, - } -} - -func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { - return &infrav1.OpenStackMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: openStackMachineName, - Namespace: namespace, - }, - Spec: infrav1.OpenStackMachineSpec{ - // ProviderID is set by the controller - // InstanceID is set by the controller - // FloatingIP is only used by the cluster controller for the Bastion - // TODO: Test Networks, Ports, Subnet, and Trunk separately - Flavor: flavorName, - Image: infrav1.ImageParam{ID: ptr.To(imageUUID)}, - SSHKeyName: sshKeyName, - Tags: []string{"test-tag"}, - ServerMetadata: []infrav1.ServerMetadata{ - {Key: "test-metadata", Value: "test-value"}, + portOpts := []infrav1.PortOpts{ + { + Network: &infrav1.NetworkParam{ + ID: ptr.To(openStackCluster.Status.Network.ID), }, - ConfigDrive: ptr.To(true), - SecurityGroups: []infrav1.SecurityGroupParam{}, - ServerGroup: &infrav1.ServerGroupParam{ID: ptr.To(serverGroupUUID)}, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - ServerGroupID: serverGroupUUID, + SecurityGroups: []infrav1.SecurityGroupParam{ + { + ID: ptr.To(openStackCluster.Status.WorkerSecurityGroup.ID), + }, }, }, } -} - -func getDefaultInstanceSpec() *compute.InstanceSpec { - return &compute.InstanceSpec{ - Name: openStackMachineName, - ImageID: imageUUID, - Flavor: flavorName, - SSHKeyName: sshKeyName, - UserData: "user-data", - Metadata: map[string]string{ - "test-metadata": "test-value", - }, - ConfigDrive: *ptr.To(true), - FailureDomain: *ptr.To(failureDomain), - ServerGroupID: serverGroupUUID, - Tags: []string{"test-tag"}, - } -} - -func Test_machineToInstanceSpec(t *testing.T) { - RegisterTestingT(t) - + image := infrav1.ImageParam{Filter: &infrav1.ImageFilter{Name: ptr.To("my-image")}} + tags := []string{"tag1", "tag2"} + userData := &corev1.LocalObjectReference{Name: "server-data-secret"} tests := []struct { - name string - openStackCluster func() *infrav1.OpenStackCluster - machine func() *clusterv1.Machine - openStackMachine func() *infrav1.OpenStackMachine - wantInstanceSpec func() *compute.InstanceSpec + name string + spec *infrav1.OpenStackMachineSpec + want *infrav1alpha1.OpenStackServerSpec }{ { - name: "Defaults", - openStackCluster: getDefaultOpenStackCluster, - machine: getDefaultMachine, - openStackMachine: getDefaultOpenStackMachine, - wantInstanceSpec: getDefaultInstanceSpec, - }, - { - name: "Tags", - openStackCluster: func() *infrav1.OpenStackCluster { - c := getDefaultOpenStackCluster() - c.Spec.Tags = []string{"cluster-tag", "duplicate-tag"} - return c + name: "Test a minimum OpenStackMachineSpec to OpenStackServerSpec conversion", + spec: &infrav1.OpenStackMachineSpec{ + Flavor: flavorName, + Image: image, + SSHKeyName: sshKeyName, }, - machine: getDefaultMachine, - openStackMachine: func() *infrav1.OpenStackMachine { - m := getDefaultOpenStackMachine() - m.Spec.Tags = []string{"machine-tag", "duplicate-tag"} - return m - }, - wantInstanceSpec: func() *compute.InstanceSpec { - i := getDefaultInstanceSpec() - i.Tags = []string{"machine-tag", "duplicate-tag", "cluster-tag"} - return i + want: &infrav1alpha1.OpenStackServerSpec{ + Flavor: flavorName, + IdentityRef: identityRef, + Image: image, + SSHKeyName: sshKeyName, + Ports: portOpts, + Tags: tags, + UserDataRef: userData, }, }, } - for _, tt := range tests { + for i := range tests { + tt := tests[i] t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - got, _ := machineToInstanceSpec(tt.openStackCluster(), tt.machine(), tt.openStackMachine(), "user-data") - wanted := tt.wantInstanceSpec() - - g.Expect(got).To(Equal(wanted), cmp.Diff(got, wanted)) + spec := openStackMachineSpecToOpenStackServerSpec(tt.spec, identityRef, tags, "", userData, &openStackCluster.Status.WorkerSecurityGroup.ID, openStackCluster.Status.Network.ID) + if !reflect.DeepEqual(spec, tt.want) { + t.Errorf("openStackMachineSpecToOpenStackServerSpec() got = %+v, want %+v", spec, tt.want) + } }) } } @@ -218,354 +150,3 @@ func TestGetPortIDs(t *testing.T) { }) } } - -func Test_reconcileDelete(t *testing.T) { - const ( - instanceUUID = "8308882f-5e46-47e6-8e12-1fe869c43d1d" - portUUID = "55eac199-4836-4a98-b31c-9f65f382ad46" - rootVolumeUUID = "4724a66d-bd5e-47f3-bb57-a67fcb4168e0" - trunkUUID = "9d348baa-93b1-4e63-932f-dd0527fbd789" - - imageName = "my-image" - ) - - // ******************* - // START OF TEST CASES - // ******************* - - type recorders struct { - compute *mock.MockComputeClientMockRecorder - image *mock.MockImageClientMockRecorder - network *mock.MockNetworkClientMockRecorder - volume *mock.MockVolumeClientMockRecorder - } - - defaultImage := infrav1.ImageParam{ - Filter: &infrav1.ImageFilter{ - Name: ptr.To(imageName), - }, - } - - defaultResolvedPorts := []infrav1.ResolvedPortSpec{ - { - Name: openStackMachineName + "-0", - Description: "my test port", - NetworkID: networkUUID, - }, - } - defaultPortsStatus := []infrav1.PortStatus{ - { - ID: portUUID, - }, - } - - deleteDefaultPorts := func(r *recorders) { - trunkExtension := extensions.Extension{} - trunkExtension.Alias = "trunk" - r.network.ListExtensions().Return([]extensions.Extension{trunkExtension}, nil) - r.network.ListTrunk(trunks.ListOpts{PortID: portUUID}).Return([]trunks.Trunk{{ID: trunkUUID}}, nil) - r.network.ListTrunkSubports(trunkUUID).Return([]trunks.Subport{}, nil) - r.network.DeleteTrunk(trunkUUID).Return(nil) - r.network.DeletePort(portUUID).Return(nil) - } - - deleteServerByID := func(r *recorders) { - r.compute.GetServer(instanceUUID).Return(&servers.Server{ - ID: instanceUUID, - Name: openStackMachineName, - }, nil) - r.compute.DeleteServer(instanceUUID).Return(nil) - r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) - } - deleteServerByName := func(r *recorders) { - r.compute.ListServers(servers.ListOpts{ - Name: "^" + openStackMachineName + "$", - }).Return([]servers.Server{ - { - ID: instanceUUID, - Name: openStackMachineName, - }, - }, nil) - r.compute.DeleteServer(instanceUUID).Return(nil) - r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) - } - - deleteMissingServerByName := func(r *recorders) { - // Lookup server by name because it is not in status. - // Don't find it. - r.compute.ListServers(servers.ListOpts{ - Name: "^" + openStackMachineName + "$", - }).Return([]servers.Server{}, nil) - } - - deleteRootVolume := func(r *recorders) { - // Fetch volume by name - volumeName := fmt.Sprintf("%s-root", openStackMachineName) - r.volume.ListVolumes(volumes.ListOpts{ - AllTenants: false, - Name: volumeName, - TenantID: "", - }).Return([]volumes.Volume{{ - ID: rootVolumeUUID, - Name: volumeName, - }}, nil) - - // Delete volume - r.volume.DeleteVolume(rootVolumeUUID, volumes.DeleteOpts{}).Return(nil) - } - - adoptExistingPorts := func(r *recorders) { - r.network.ListPort(ports.ListOpts{ - NetworkID: networkUUID, - Name: openStackMachineName + "-0", - }).Return([]ports.Port{{ID: portUUID}}, nil) - } - - resolveImage := func(r *recorders) { - r.image.ListImages(images.ListOpts{ - Name: imageName, - }).Return([]images.Image{{ID: imageUUID}}, nil) - } - - tests := []struct { - name string - osMachine infrav1.OpenStackMachine - expect func(r *recorders) - wantErr bool - wantRemoveFinalizer bool - clusterNotReady bool - }{ - { - name: "No volumes, resolved and resources populated", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - InstanceID: ptr.To(instanceUUID), - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - deleteServerByID(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - name: "Root volume, resolved and resources populated", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - RootVolume: &infrav1.RootVolume{ - SizeGiB: 50, - }, - }, - Status: infrav1.OpenStackMachineStatus{ - InstanceID: ptr.To(instanceUUID), - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - // Server exists, so we don't delete root volume explicitly - deleteServerByID(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - name: "Root volume, machine not created, resolved and resources populated", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - RootVolume: &infrav1.RootVolume{ - SizeGiB: 50, - }, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - deleteMissingServerByName(r) - deleteRootVolume(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - // N.B. The 'no resolved but resource exist' case can - // only happen across an upgrade. At some point in the - // future we should stop handling it. - name: "No volumes, no resolved or resources, instance exists", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - // Unlike resolved and resources, - // instanceID will have been converted - // from the previous API version. - InstanceID: ptr.To(instanceUUID), - }, - }, - expect: func(r *recorders) { - resolveImage(r) - adoptExistingPorts(r) - deleteServerByID(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - // This is an upgrade case because from v0.10 onwards - // we don't add the finalizer until we add resolved, so - // this can no longer occur. This will stop working when - // we remove handling for empty resolved on delete. - name: "Invalid image, no resolved or resources", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - }, - expect: func(r *recorders) { - r.image.ListImages(images.ListOpts{Name: imageName}).Return([]images.Image{}, nil) - }, - wantErr: true, - wantRemoveFinalizer: true, - }, - { - name: "No instance id, server and ports exist", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - deleteServerByName(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - name: "Adopt ports error should fail deletion and retry", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - }, - }, - expect: func(r *recorders) { - r.network.ListPort(ports.ListOpts{ - NetworkID: networkUUID, - Name: openStackMachineName + "-0", - }).Return(nil, fmt.Errorf("error adopting ports")) - }, - wantErr: true, - wantRemoveFinalizer: false, - }, - { - // This is an upgrade case because from v0.10 onwards we - // should not have added the finalizer until the cluster - // is ready. - name: "Cluster not ready should remove finalizer", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - }, - clusterNotReady: true, - wantRemoveFinalizer: true, - }, - } - for i := range tests { - tt := &tests[i] - t.Run(tt.name, func(t *testing.T) { - g := NewGomegaWithT(t) - log := testr.New(t) - - mockCtrl := gomock.NewController(t) - mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "") - - reconciler := OpenStackMachineReconciler{} - - computeRecorder := mockScopeFactory.ComputeClient.EXPECT() - imageRecorder := mockScopeFactory.ImageClient.EXPECT() - networkRecorder := mockScopeFactory.NetworkClient.EXPECT() - volumeRecorder := mockScopeFactory.VolumeClient.EXPECT() - - if tt.expect != nil { - tt.expect(&recorders{computeRecorder, imageRecorder, networkRecorder, volumeRecorder}) - } - scopeWithLogger := scope.NewWithLogger(mockScopeFactory, log) - - openStackCluster := infrav1.OpenStackCluster{} - openStackCluster.Status.Ready = !tt.clusterNotReady - openStackCluster.Status.Network = &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - Name: "my-network", - ID: networkUUID, - }, - Subnets: []infrav1.Subnet{ - { - Name: "my-subnet", - ID: subnetUUID, - CIDR: "192.168.0.0/24", - }, - }, - } - - machine := clusterv1.Machine{} - - osMachine := &tt.osMachine - osMachine.Name = openStackMachineName - osMachine.Finalizers = []string{infrav1.MachineFinalizer} - - _, err := reconciler.reconcileDelete(scopeWithLogger, openStackMachineName, &openStackCluster, &machine, &tt.osMachine) - - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - if tt.wantRemoveFinalizer { - g.Expect(osMachine.Finalizers).To(BeEmpty()) - } else { - g.Expect(osMachine.Finalizers).To(ConsistOf(infrav1.MachineFinalizer)) - } - }) - } -} diff --git a/controllers/openstackserver_controller.go b/controllers/openstackserver_controller.go new file mode 100644 index 0000000000..02766612b1 --- /dev/null +++ b/controllers/openstackserver_controller.go @@ -0,0 +1,617 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/names" +) + +const ( + SpecHashAnnotation = "infrastructure.cluster.x-k8s.io/spec-hash" +) + +// OpenStackServerReconciler reconciles a OpenStackServer object. +type OpenStackServerReconciler struct { + Client client.Client + Recorder record.EventRecorder + WatchFilterValue string + ScopeFactory scope.Factory + CaCertificates []byte // PEM encoded ca certificates. + + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackservers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackservers/status,verbs=get;update;patch + +func (r *OpenStackServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the OpenStackServer instance. + openStackServer := &infrav1alpha1.OpenStackServer{} + err := r.Client.Get(ctx, req.NamespacedName, openStackServer) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + clientScope, err := r.ScopeFactory.NewClientScopeFromObject(ctx, r.Client, r.CaCertificates, log, openStackServer) + if err != nil { + return reconcile.Result{}, err + } + scope := scope.NewWithLogger(clientScope, log) + + scope.Logger().Info("Reconciling OpenStackServer") + + cluster, err := getClusterFromMetadata(ctx, r.Client, openStackServer.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster != nil { + if annotations.IsPaused(cluster, openStackServer) { + scope.Logger().Info("OpenStackServer %s/%s linked to a Cluster that is paused. Won't reconcile", openStackServer.Namespace, openStackServer.Name) + return reconcile.Result{}, nil + } + } + + patchHelper, err := patch.NewHelper(openStackServer, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + defer func() { + if err := patchServer(ctx, patchHelper, openStackServer); err != nil { + result = ctrl.Result{} + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + if !openStackServer.ObjectMeta.DeletionTimestamp.IsZero() { + return reconcile.Result{}, r.reconcileDelete(scope, openStackServer) + } + + return r.reconcileNormal(ctx, scope, openStackServer) +} + +func patchServer(ctx context.Context, patchHelper *patch.Helper, openStackServer *infrav1alpha1.OpenStackServer, options ...patch.Option) error { + // Always update the readyCondition by summarizing the state of other conditions. + applicableConditions := []clusterv1.ConditionType{ + infrav1.InstanceReadyCondition, + } + + conditions.SetSummary(openStackServer, conditions.WithConditions(applicableConditions...)) + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + // Also, if requested, we are adding additional options like e.g. Patch ObservedGeneration when issuing the + // patch at the end of the reconcile loop. + options = append(options, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + infrav1.InstanceReadyCondition, + }}, + ) + conditions.SetSummary(openStackServer, + conditions.WithConditions( + infrav1.InstanceReadyCondition, + ), + ) + + return patchHelper.Patch(ctx, openStackServer, options...) +} + +func (r *OpenStackServerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1alpha1.OpenStackServer{}). + Complete(r) +} + +func (r *OpenStackServerReconciler) reconcileDelete(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) error { + scope.Logger().Info("Reconciling Server delete") + + computeService, err := compute.NewService(scope) + if err != nil { + return err + } + + networkingService, err := networking.NewService(scope) + if err != nil { + return err + } + + // Check for any orphaned resources + // N.B. Unlike resolveServerResources, we must always look for orphaned resources in the delete path. + if err := adoptServerResources(scope, openStackServer); err != nil { + return fmt.Errorf("adopting server resources: %w", err) + } + + instanceStatus, err := getServerStatus(openStackServer, computeService) + if err != nil { + return err + } + + // If no instance was created we currently need to check for orphaned volumes. + if instanceStatus == nil { + if err := computeService.DeleteVolumes(openStackServer.Name, openStackServer.Spec.RootVolume, openStackServer.Spec.AdditionalBlockDevices); err != nil { + return fmt.Errorf("delete volumes: %w", err) + } + } else { + if err := computeService.DeleteInstance(openStackServer, instanceStatus); err != nil { + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceDeleteFailedReason, clusterv1.ConditionSeverityError, "Deleting instance failed: %v", err) + return fmt.Errorf("delete instance: %w", err) + } + } + + trunkSupported, err := networkingService.IsTrunkExtSupported() + if err != nil { + return err + } + + if openStackServer.Status.Resources != nil { + portsStatus := openStackServer.Status.Resources.Ports + for _, port := range portsStatus { + if err := networkingService.DeleteInstanceTrunkAndPort(openStackServer, port, trunkSupported); err != nil { + return fmt.Errorf("failed to delete port %q: %w", port.ID, err) + } + } + } + + if err := r.reconcileDeleteFloatingAddressFromPool(scope, openStackServer); err != nil { + return err + } + + controllerutil.RemoveFinalizer(openStackServer, infrav1alpha1.OpenStackServerFinalizer) + scope.Logger().Info("Reconciled Server deleted successfully") + return nil +} + +func (r *OpenStackServerReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (_ ctrl.Result, reterr error) { + // If the OpenStackServer is in an error state, return early. + if openStackServer.Status.InstanceState != nil && *openStackServer.Status.InstanceState == infrav1.InstanceStateError { + scope.Logger().Info("Not reconciling server in error state. See openStackServer.status or previously logged error for details") + return ctrl.Result{}, nil + } + + scope.Logger().Info("Reconciling Server create") + + changed, err := resolveServerResources(scope, openStackServer) + if err != nil { + return ctrl.Result{}, err + } + + // Also add the finalizer when writing resolved resources so we can start creating resources on the next reconcile. + if controllerutil.AddFinalizer(openStackServer, infrav1alpha1.OpenStackServerFinalizer) { + changed = true + } + + // We requeue if we either added the finalizer or resolved server + // resources. This means that we never create any resources unless we + // have observed that the finalizer and resolved server resources were + // successfully written in a previous transaction. This in turn means + // that in the delete path we can be sure that if there are no resolved + // resources then no resources were created. + if changed { + scope.Logger().V(6).Info("Server resources updated, requeuing") + return ctrl.Result{}, nil + } + + // Check for orphaned resources previously created but not written to the status + if err := adoptServerResources(scope, openStackServer); err != nil { + return ctrl.Result{}, fmt.Errorf("adopting server resources: %w", err) + } + computeService, err := compute.NewService(scope) + if err != nil { + return ctrl.Result{}, err + } + networkingService, err := networking.NewService(scope) + if err != nil { + return ctrl.Result{}, err + } + + floatingAddressClaim, waitingForFloatingAddress, err := r.reconcileFloatingAddressFromPool(ctx, scope, openStackServer) + if err != nil || waitingForFloatingAddress { + return ctrl.Result{}, err + } + + err = getOrCreateServerPorts(openStackServer, networkingService) + if err != nil { + return ctrl.Result{}, err + } + portIDs := GetPortIDs(openStackServer.Status.Resources.Ports) + + instanceStatus, err := r.getOrCreateServer(ctx, scope.Logger(), openStackServer, computeService, portIDs) + if err != nil || instanceStatus == nil { + // Conditions set in getOrCreateInstance + return ctrl.Result{}, err + } + + instanceNS, err := instanceStatus.NetworkStatus() + if err != nil { + return ctrl.Result{}, fmt.Errorf("get network status: %w", err) + } + + if floatingAddressClaim != nil { + if err := r.associateIPAddressFromIPAddressClaim(ctx, openStackServer, instanceStatus, instanceNS, floatingAddressClaim, networkingService); err != nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Failed while associating ip from pool: %v", err) + return ctrl.Result{}, err + } + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + } + + state := instanceStatus.State() + openStackServer.Status.InstanceID = ptr.To(instanceStatus.ID()) + openStackServer.Status.InstanceState = &state + + switch instanceStatus.State() { + case infrav1.InstanceStateActive: + scope.Logger().Info("Server instance state is ACTIVE", "id", instanceStatus.ID()) + conditions.MarkTrue(openStackServer, infrav1.InstanceReadyCondition) + openStackServer.Status.Ready = true + case infrav1.InstanceStateError: + scope.Logger().Info("Server instance state is ERROR", "id", instanceStatus.ID()) + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceStateErrorReason, clusterv1.ConditionSeverityError, "") + return ctrl.Result{}, nil + case infrav1.InstanceStateDeleted: + // we should avoid further actions for DELETED VM + scope.Logger().Info("Server instance state is DELETED, no actions") + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceDeletedReason, clusterv1.ConditionSeverityError, "") + return ctrl.Result{}, nil + case infrav1.InstanceStateBuild, infrav1.InstanceStateUndefined: + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) + return ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil + default: + // The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE + // due to potential conflict or unexpected actions + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) + conditions.MarkUnknown(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %s", instanceStatus.State()) + + return ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile}, nil + } + + scope.Logger().Info("Reconciled Server create successfully") + return ctrl.Result{}, nil +} + +// resolveServerResources resolves and stores the OpenStack resources for the server. +func resolveServerResources(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (bool, error) { + if openStackServer.Status.Resources == nil { + openStackServer.Status.Resources = &infrav1alpha1.ServerResources{} + } + resolved := openStackServer.Status.Resolved + if resolved == nil { + resolved = &infrav1alpha1.ResolvedServerSpec{} + openStackServer.Status.Resolved = resolved + } + return compute.ResolveServerSpec(scope, openStackServer) +} + +// adoptServerResources adopts the OpenStack resources for the server. +func adoptServerResources(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) error { + resources := openStackServer.Status.Resources + if resources == nil { + resources = &infrav1alpha1.ServerResources{} + openStackServer.Status.Resources = resources + } + + // Adopt any existing resources + return compute.AdoptServerResources(scope, openStackServer.Status.Resolved, resources) +} + +func getOrCreateServerPorts(openStackServer *infrav1alpha1.OpenStackServer, networkingService *networking.Service) error { + resolved := openStackServer.Status.Resolved + if resolved == nil { + return errors.New("server status resolved is nil") + } + resources := openStackServer.Status.Resources + if resources == nil { + return errors.New("server status resources is nil") + } + desiredPorts := resolved.Ports + + if len(desiredPorts) == len(resources.Ports) { + return nil + } + + if err := networkingService.CreatePorts(openStackServer, desiredPorts, resources); err != nil { + return fmt.Errorf("creating ports: %w", err) + } + + return nil +} + +// getOrCreateServer gets or creates a server instance and returns the instance status, or an error. +func (r *OpenStackServerReconciler) getOrCreateServer(ctx context.Context, logger logr.Logger, openStackServer *infrav1alpha1.OpenStackServer, computeService *compute.Service, portIDs []string) (*compute.InstanceStatus, error) { + var instanceStatus *compute.InstanceStatus + var err error + + if openStackServer.Status.InstanceID != nil { + instanceStatus, err = computeService.GetInstanceStatus(*openStackServer.Status.InstanceID) + if err != nil { + logger.Info("Unable to get OpenStack instance", "name", openStackServer.Name) + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.OpenStackErrorReason, clusterv1.ConditionSeverityError, err.Error()) + return nil, err + } + } + if instanceStatus == nil { + // Check if there is an existing instance with machine name, in case where instance ID would not have been stored in machine status + instanceStatus, err := computeService.GetInstanceStatusByName(openStackServer, openStackServer.Name) + if err != nil { + logger.Error(err, "Failed to get instance by name", "name", openStackServer.Name) + return nil, err + } + if instanceStatus != nil { + logger.Info("Server already exists", "name", openStackServer.Name, "id", instanceStatus.ID()) + return instanceStatus, nil + } + if openStackServer.Status.InstanceID != nil { + logger.Info("Not reconciling server in failed state. The previously existing OpenStack instance is no longer available") + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists") + return nil, nil + } + + logger.Info("Server does not exist, creating Server", "name", openStackServer.Name) + instanceSpec, err := r.serverToInstanceSpec(ctx, openStackServer) + if err != nil { + return nil, err + } + instanceSpec.Name = openStackServer.Name + instanceStatus, err = computeService.CreateInstance(openStackServer, instanceSpec, portIDs) + if err != nil { + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + openStackServer.Status.InstanceState = &infrav1.InstanceStateError + return nil, fmt.Errorf("create OpenStack instance: %w", err) + } + return instanceStatus, nil + } + return instanceStatus, nil +} + +func (r *OpenStackServerReconciler) getUserDataSecretValue(ctx context.Context, namespace, secretName string) (string, error) { + secret := &corev1.Secret{} + key := types.NamespacedName{Namespace: namespace, Name: secretName} + if err := r.Client.Get(ctx, key, secret); err != nil { + return "", fmt.Errorf("failed to get secret %s/%s: %w", namespace, secretName, err) + } + + value, ok := secret.Data["value"] + if !ok { + return "", fmt.Errorf("secret %s/%s does not contain userData", namespace, secretName) + } + + return base64.StdEncoding.EncodeToString(value), nil +} + +func (r *OpenStackServerReconciler) serverToInstanceSpec(ctx context.Context, openStackServer *infrav1alpha1.OpenStackServer) (*compute.InstanceSpec, error) { + resolved := openStackServer.Status.Resolved + if resolved == nil { + return nil, errors.New("server resolved is nil") + } + + serverMetadata := make(map[string]string, len(openStackServer.Spec.ServerMetadata)) + for i := range openStackServer.Spec.ServerMetadata { + key := openStackServer.Spec.ServerMetadata[i].Key + value := openStackServer.Spec.ServerMetadata[i].Value + serverMetadata[key] = value + } + + instanceSpec := &compute.InstanceSpec{ + AdditionalBlockDevices: openStackServer.Spec.AdditionalBlockDevices, + ConfigDrive: openStackServer.Spec.ConfigDrive != nil && *openStackServer.Spec.ConfigDrive, + Flavor: openStackServer.Spec.Flavor, + ImageID: resolved.ImageID, + Metadata: serverMetadata, + Name: openStackServer.Name, + RootVolume: openStackServer.Spec.RootVolume, + SSHKeyName: openStackServer.Spec.SSHKeyName, + ServerGroupID: resolved.ServerGroupID, + Tags: openStackServer.Spec.Tags, + Trunk: openStackServer.Spec.Trunk != nil && *openStackServer.Spec.Trunk, + } + + if openStackServer.Spec.UserDataRef != nil { + userData, err := r.getUserDataSecretValue(ctx, openStackServer.Namespace, openStackServer.Spec.UserDataRef.Name) + if err != nil { + return nil, fmt.Errorf("failed to get user data secret value") + } + instanceSpec.UserData = userData + } + + if openStackServer.Spec.AvailabilityZone != nil { + instanceSpec.FailureDomain = *openStackServer.Spec.AvailabilityZone + } + + return instanceSpec, nil +} + +func getServerStatus(openStackServer *infrav1alpha1.OpenStackServer, computeService *compute.Service) (*compute.InstanceStatus, error) { + if openStackServer.Status.InstanceID != nil { + return computeService.GetInstanceStatus(*openStackServer.Status.InstanceID) + } + return computeService.GetInstanceStatusByName(openStackServer, openStackServer.Name) +} + +// getClusterFromMetadata returns the Cluster object (if present) using the object metadata. +// This function was copied from the cluster-api project but manages errors differently. +func getClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + // If the object is unlabeled, return early with no error. + // It's fine for this object to not be part of a cluster. + if obj.Labels[clusterv1.ClusterNameLabel] == "" { + return nil, nil + } + // At this point, the object has a cluster name label so we should be able to find the cluster + // and return an error if we can't. + return util.GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.ClusterNameLabel]) +} + +// reconcileFloatingAddressFromPool reconciles the floating IP address from the pool. +// It returns the IPAddressClaim and a boolean indicating if the IPAddressClaim is ready. +func (r *OpenStackServerReconciler) reconcileFloatingAddressFromPool(ctx context.Context, scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (*ipamv1.IPAddressClaim, bool, error) { + if openStackServer.Spec.FloatingIPPoolRef == nil { + return nil, false, nil + } + var claim *ipamv1.IPAddressClaim + claim, err := r.getOrCreateIPAddressClaimForFloatingAddress(ctx, scope, openStackServer) + if err != nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityInfo, "Failed to reconcile floating IP claims: %v", err) + return nil, true, err + } + if claim.Status.AddressRef.Name == "" { + r.Recorder.Eventf(openStackServer, corev1.EventTypeNormal, "WaitingForIPAddressClaim", "Waiting for IPAddressClaim %s/%s to be allocated", claim.Namespace, claim.Name) + return claim, true, nil + } + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + return claim, false, nil +} + +// createIPAddressClaim creates IPAddressClaim for the FloatingAddressFromPool if it does not exist yet. +func (r *OpenStackServerReconciler) getOrCreateIPAddressClaimForFloatingAddress(ctx context.Context, scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (*ipamv1.IPAddressClaim, error) { + var err error + + poolRef := openStackServer.Spec.FloatingIPPoolRef + claimName := names.GetFloatingAddressClaimName(openStackServer.Name) + claim := &ipamv1.IPAddressClaim{} + + err = r.Client.Get(ctx, client.ObjectKey{Namespace: openStackServer.Namespace, Name: claimName}, claim) + if err == nil { + return claim, nil + } else if client.IgnoreNotFound(err) != nil { + return nil, err + } + + claim = &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName, + Namespace: openStackServer.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: openStackServer.APIVersion, + Kind: openStackServer.Kind, + Name: openStackServer.Name, + UID: openStackServer.UID, + }, + }, + Finalizers: []string{infrav1.IPClaimMachineFinalizer}, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: *poolRef, + }, + } + + // If the OpenStackServer has a ClusterNameLabel, set it on the IPAddressClaim as well. + // This is useful for garbage collection of IPAddressClaims when a Cluster is deleted. + if openStackServer.ObjectMeta.Labels[clusterv1.ClusterNameLabel] != "" { + claim.ObjectMeta.Labels[clusterv1.ClusterNameLabel] = openStackServer.ObjectMeta.Labels[clusterv1.ClusterNameLabel] + } + + if err := r.Client.Create(ctx, claim); err != nil { + return nil, err + } + + r.Recorder.Eventf(openStackServer, corev1.EventTypeNormal, "CreatingIPAddressClaim", "Creating IPAddressClaim %s/%s", claim.Namespace, claim.Name) + scope.Logger().Info("Created IPAddressClaim", "name", claim.Name) + return claim, nil +} + +func (r *OpenStackServerReconciler) associateIPAddressFromIPAddressClaim(ctx context.Context, openStackServer *infrav1alpha1.OpenStackServer, instanceStatus *compute.InstanceStatus, instanceNS *compute.InstanceNetworkStatus, claim *ipamv1.IPAddressClaim, networkingService *networking.Service) error { + address := &ipamv1.IPAddress{} + addressKey := client.ObjectKey{Namespace: openStackServer.Namespace, Name: claim.Status.AddressRef.Name} + + if err := r.Client.Get(ctx, addressKey, address); err != nil { + return err + } + + instanceAddresses := instanceNS.Addresses() + for _, instanceAddress := range instanceAddresses { + if instanceAddress.Address == address.Spec.Address { + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + return nil + } + } + + fip, err := networkingService.GetFloatingIP(address.Spec.Address) + if err != nil { + return err + } + + if fip == nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "floating IP does not exist") + return fmt.Errorf("floating IP %q does not exist", address.Spec.Address) + } + + port, err := networkingService.GetPortForExternalNetwork(instanceStatus.ID(), fip.FloatingNetworkID) + if err != nil { + return fmt.Errorf("get port for floating IP %q: %w", fip.FloatingIP, err) + } + + if port == nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Can't find port for floating IP %q on external network %s", fip.FloatingIP, fip.FloatingNetworkID) + return fmt.Errorf("port for floating IP %q on network %s does not exist", fip.FloatingIP, fip.FloatingNetworkID) + } + + if err = networkingService.AssociateFloatingIP(openStackServer, fip, port.ID); err != nil { + return err + } + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + return nil +} + +func (r *OpenStackServerReconciler) reconcileDeleteFloatingAddressFromPool(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) error { + log := scope.Logger().WithValues("openStackMachine", openStackServer.Name) + log.Info("Reconciling Machine delete floating address from pool") + if openStackServer.Spec.FloatingIPPoolRef == nil { + return nil + } + claimName := names.GetFloatingAddressClaimName(openStackServer.Name) + claim := &ipamv1.IPAddressClaim{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: openStackServer.Namespace, Name: claimName}, claim); err != nil { + return client.IgnoreNotFound(err) + } + + controllerutil.RemoveFinalizer(claim, infrav1.IPClaimMachineFinalizer) + return r.Client.Update(context.Background(), claim) +} diff --git a/controllers/openstackserver_controller_test.go b/controllers/openstackserver_controller_test.go new file mode 100644 index 0000000000..adde365964 --- /dev/null +++ b/controllers/openstackserver_controller_test.go @@ -0,0 +1,543 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "reflect" + "testing" + + "github.com/go-logr/logr/testr" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/portsbinding" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" + . "github.com/onsi/gomega" //nolint:revive + "go.uber.org/mock/gomock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" +) + +const ( + openStackServerName = "test-openstack-server" + instanceUUID = "8308882f-5e46-47e6-8e12-1fe869c43d1d" + portUUID = "55eac199-4836-4a98-b31c-9f65f382ad46" + rootVolumeUUID = "4724a66d-bd5e-47f3-bb57-a67fcb4168e0" + trunkUUID = "9d348baa-93b1-4e63-932f-dd0527fbd789" + imageName = "my-image" + defaultFlavor = "m1.small" +) + +type recorders struct { + compute *mock.MockComputeClientMockRecorder + image *mock.MockImageClientMockRecorder + network *mock.MockNetworkClientMockRecorder + volume *mock.MockVolumeClientMockRecorder +} + +var defaultImage = infrav1.ImageParam{ + Filter: &infrav1.ImageFilter{ + Name: ptr.To(imageName), + }, +} + +var defaultPortOpts = []infrav1.PortOpts{ + { + Network: &infrav1.NetworkParam{ + ID: ptr.To(networkUUID), + }, + }, +} + +var defaultResolvedPorts = []infrav1.ResolvedPortSpec{ + { + Name: openStackServerName + "-0", + NetworkID: networkUUID, + }, +} + +var defaultPortsStatus = []infrav1.PortStatus{ + { + ID: portUUID, + }, +} + +var getDefaultFlavor = func(r *recorders) { + f := flavors.Flavor{ + Name: defaultFlavor, + } + r.compute.GetFlavorFromName(defaultFlavor).Return(&f, nil) +} + +var createDefaultPort = func(r *recorders) { + createOpts := ports.CreateOpts{ + Name: openStackServerName + "-0", + NetworkID: networkUUID, + } + portsBuilder := portsbinding.CreateOptsExt{ + CreateOptsBuilder: createOpts, + } + r.network.CreatePort(portsBuilder).Return(&ports.Port{ + ID: portUUID, + }, nil) +} + +var createDefaultServer = func(r *recorders) { + // Mock any server creation + r.compute.CreateServer(gomock.Any(), gomock.Any()).Return(&servers.Server{ID: instanceUUID}, nil) +} + +var listDefaultPorts = func(r *recorders) { + r.network.ListPort(ports.ListOpts{ + Name: openStackServerName + "-0", + NetworkID: networkUUID, + }).Return([]ports.Port{ + { + ID: portUUID, + }, + }, nil) +} + +var listDefaultPortsNotFound = func(r *recorders) { + r.network.ListPort(ports.ListOpts{ + Name: openStackServerName + "-0", + NetworkID: networkUUID, + }).Return(nil, nil) +} + +var listDefaultServerNotFound = func(r *recorders) { + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return([]servers.Server{}, nil) +} + +var listDefaultServerFound = func(r *recorders) { + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return([]servers.Server{{ID: instanceUUID}}, nil) +} + +var deleteDefaultPorts = func(r *recorders) { + trunkExtension := extensions.Extension{} + trunkExtension.Alias = "trunk" + r.network.ListExtensions().Return([]extensions.Extension{trunkExtension}, nil) + r.network.ListTrunk(trunks.ListOpts{PortID: portUUID}).Return([]trunks.Trunk{{ID: trunkUUID}}, nil) + r.network.ListTrunkSubports(trunkUUID).Return([]trunks.Subport{}, nil) + r.network.DeleteTrunk(trunkUUID).Return(nil) + r.network.DeletePort(portUUID).Return(nil) +} + +var deleteServerByID = func(r *recorders) { + r.compute.GetServer(instanceUUID).Return(&servers.Server{ID: instanceUUID, Name: openStackServerName}, nil) + r.compute.DeleteServer(instanceUUID).Return(nil) + r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) +} + +var deleteServerByName = func(r *recorders) { + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return([]servers.Server{{ID: instanceUUID, Name: openStackServerName}}, nil) + r.compute.DeleteServer(instanceUUID).Return(nil) + r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) +} + +var deleteMissingServerByName = func(r *recorders) { + // Lookup server by name because it is not in status. + // Don't find it. + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return(nil, nil) +} + +var deleteRootVolume = func(r *recorders) { + // Fetch volume by name + volumeName := fmt.Sprintf("%s-root", openStackServerName) + r.volume.ListVolumes(volumes.ListOpts{ + AllTenants: false, + Name: volumeName, + TenantID: "", + }).Return([]volumes.Volume{{ + ID: rootVolumeUUID, + Name: volumeName, + }}, nil) + + // Delete volume + r.volume.DeleteVolume(rootVolumeUUID, volumes.DeleteOpts{}).Return(nil) +} + +func TestOpenStackServer_serverToInstanceSpec(t *testing.T) { + tests := []struct { + name string + openStackServer *infrav1alpha1.OpenStackServer + want *compute.InstanceSpec + wantErr bool + }{ + { + name: "Test serverToInstanceSpec without resolved resources", + openStackServer: &infrav1alpha1.OpenStackServer{}, + wantErr: true, + }, + { + name: "Test serverToInstanceSpec with resolved resources", + openStackServer: &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: infrav1alpha1.OpenStackServerSpec{ + AdditionalBlockDevices: []infrav1.AdditionalBlockDevice{ + { + Name: "block-device", + SizeGiB: 10, + Storage: infrav1.BlockDeviceStorage{ + Type: "ceph", + }, + }, + }, + AvailabilityZone: ptr.To("failure-domain"), + ConfigDrive: ptr.To(true), + Flavor: "large", + RootVolume: &infrav1.RootVolume{ + SizeGiB: 10, + BlockDeviceVolume: infrav1.BlockDeviceVolume{ + Type: "fast", + }, + }, + ServerMetadata: []infrav1.ServerMetadata{{Key: "key", Value: "value"}}, + SSHKeyName: "key", + Tags: []string{"tag1", "tag2"}, + Trunk: ptr.To(true), + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: "123", + ServerGroupID: "456", + }, + }, + }, + want: &compute.InstanceSpec{ + AdditionalBlockDevices: []infrav1.AdditionalBlockDevice{ + { + Name: "block-device", + SizeGiB: 10, + Storage: infrav1.BlockDeviceStorage{ + Type: "ceph", + }, + }, + }, + ConfigDrive: true, + FailureDomain: "failure-domain", + Flavor: "large", + ImageID: "123", + Metadata: map[string]string{ + "key": "value", + }, + Name: "test", + RootVolume: &infrav1.RootVolume{ + SizeGiB: 10, + BlockDeviceVolume: infrav1.BlockDeviceVolume{ + Type: "fast", + }, + }, + ServerGroupID: "456", + SSHKeyName: "key", + Tags: []string{"tag1", "tag2"}, + Trunk: true, + }, + }, + } + for i := range tests { + tt := tests[i] + t.Run(tt.name, func(t *testing.T) { + reconciler := OpenStackServerReconciler{} + spec, err := reconciler.serverToInstanceSpec(ctx, tt.openStackServer) + if (err != nil) != tt.wantErr { + t.Fatalf("serverToInstanceSpec() error = %+v, wantErr %+v", err, tt.wantErr) + } + if err == nil && !reflect.DeepEqual(spec, tt.want) { + t.Errorf("serverToInstanceSpec() got = %+v, want %+v", spec, tt.want) + } + }) + } +} + +func Test_OpenStackServerReconcileDelete(t *testing.T) { + tests := []struct { + name string + osServer infrav1alpha1.OpenStackServer + expect func(r *recorders) + wantErr bool + wantRemoveFinalizer bool + }{ + { + name: "No volumes, resolved and resources populated", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: defaultFlavor, + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + InstanceID: ptr.To(instanceUUID), + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + deleteServerByID(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "Root volume, resolved and resources populated", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + RootVolume: &infrav1.RootVolume{ + SizeGiB: 50, + }, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + InstanceID: ptr.To(instanceUUID), + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + // Server exists, so we don't delete root volume explicitly + deleteServerByID(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "Root volume, server not created, resolved and resources populated", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + RootVolume: &infrav1.RootVolume{ + SizeGiB: 50, + }, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + deleteMissingServerByName(r) + deleteRootVolume(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "No instance id, server and ports exist", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + deleteServerByName(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "Adopt ports error should fail deletion and retry", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + }, + }, + expect: func(r *recorders) { + r.network.ListPort(ports.ListOpts{ + NetworkID: networkUUID, + Name: openStackServerName + "-0", + }).Return(nil, fmt.Errorf("error adopting ports")) + }, + wantErr: true, + wantRemoveFinalizer: false, + }, + } + for i := range tests { + tt := &tests[i] + t.Run(tt.name, func(t *testing.T) { + g := NewGomegaWithT(t) + log := testr.New(t) + + mockCtrl := gomock.NewController(t) + mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "") + + reconciler := OpenStackServerReconciler{} + + computeRecorder := mockScopeFactory.ComputeClient.EXPECT() + imageRecorder := mockScopeFactory.ImageClient.EXPECT() + networkRecorder := mockScopeFactory.NetworkClient.EXPECT() + volumeRecorder := mockScopeFactory.VolumeClient.EXPECT() + + if tt.expect != nil { + tt.expect(&recorders{computeRecorder, imageRecorder, networkRecorder, volumeRecorder}) + } + scopeWithLogger := scope.NewWithLogger(mockScopeFactory, log) + + osServer := &tt.osServer + osServer.Name = openStackServerName + osServer.Finalizers = []string{infrav1alpha1.OpenStackServerFinalizer} + + err := reconciler.reconcileDelete(scopeWithLogger, &tt.osServer) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + if tt.wantRemoveFinalizer { + g.Expect(osServer.Finalizers).To(BeEmpty()) + } else { + g.Expect(osServer.Finalizers).To(ConsistOf(infrav1alpha1.OpenStackServerFinalizer)) + } + }) + } +} + +func Test_OpenStackServerReconcileCreate(t *testing.T) { + tests := []struct { + name string + osServer infrav1alpha1.OpenStackServer + expect func(r *recorders) + }{ + { + name: "Minimal server spec creating port and server", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: defaultFlavor, + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + }, + }, + expect: func(r *recorders) { + listDefaultPortsNotFound(r) + createDefaultPort(r) + getDefaultFlavor(r) + listDefaultServerNotFound(r) + createDefaultServer(r) + }, + }, + { + name: "Minimum server spec adopting port and server", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: defaultFlavor, + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + }, + }, + expect: func(r *recorders) { + listDefaultPorts(r) + listDefaultServerFound(r) + }, + }, + } + for i := range tests { + tt := &tests[i] + t.Run(tt.name, func(t *testing.T) { + g := NewGomegaWithT(t) + log := testr.New(t) + + mockCtrl := gomock.NewController(t) + mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "") + + reconciler := OpenStackServerReconciler{} + + computeRecorder := mockScopeFactory.ComputeClient.EXPECT() + imageRecorder := mockScopeFactory.ImageClient.EXPECT() + networkRecorder := mockScopeFactory.NetworkClient.EXPECT() + volumeRecorder := mockScopeFactory.VolumeClient.EXPECT() + + if tt.expect != nil { + tt.expect(&recorders{computeRecorder, imageRecorder, networkRecorder, volumeRecorder}) + } + scopeWithLogger := scope.NewWithLogger(mockScopeFactory, log) + + osServer := &tt.osServer + osServer.Name = openStackServerName + osServer.Finalizers = []string{infrav1alpha1.OpenStackServerFinalizer} + + _, err := reconciler.reconcileNormal(ctx, scopeWithLogger, &tt.osServer) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 26d9c9a123..c67d16916f 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -18,29 +18,23 @@ package controllers import ( "context" - "errors" "path/filepath" "testing" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" . "github.com/onsi/ginkgo/v2" //nolint:revive . "github.com/onsi/gomega" //nolint:revive - "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" "sigs.k8s.io/cluster-api-provider-openstack/test/helpers/external" ) @@ -81,6 +75,9 @@ var _ = BeforeSuite(func() { err = infrav1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = infrav1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + framework.TryAddDefaultSchemes(scheme.Scheme) // +kubebuilder:scaffold:scheme @@ -124,61 +121,3 @@ var _ = Describe("EnvTest sanity check", func() { // will actually stay in "Terminating" state and never be completely gone. }) }) - -var _ = Describe("When calling getOrCreate", func() { - logger := GinkgoLogr - - var ( - reconsiler OpenStackMachineReconciler - mockCtrl *gomock.Controller - mockScopeFactory *scope.MockScopeFactory - computeService *compute.Service - err error - ) - - BeforeEach(func() { - ctx = context.Background() - reconsiler = OpenStackMachineReconciler{} - mockCtrl = gomock.NewController(GinkgoT()) - mockScopeFactory = scope.NewMockScopeFactory(mockCtrl, "1234") - computeService, err = compute.NewService(scope.NewWithLogger(mockScopeFactory, logger)) - Expect(err).NotTo(HaveOccurred()) - }) - - It("should return an error if unable to get instance", func() { - openStackCluster := &infrav1.OpenStackCluster{} - machine := &clusterv1.Machine{} - openStackMachine := &infrav1.OpenStackMachine{ - Status: infrav1.OpenStackMachineStatus{ - InstanceID: ptr.To("machine-uuid"), - }, - } - - mockScopeFactory.ComputeClient.EXPECT().GetServer(gomock.Any()).Return(nil, errors.New("Test error when getting server")) - instanceStatus, err := reconsiler.getOrCreateInstance(logger, openStackCluster, machine, openStackMachine, computeService, "", []string{}) - Expect(err).To(HaveOccurred()) - Expect(instanceStatus).To(BeNil()) - conditions := openStackMachine.GetConditions() - Expect(len(conditions) > 0).To(BeTrue()) - for i := range conditions { - if conditions[i].Type == infrav1.InstanceReadyCondition { - Expect(conditions[i].Reason).To(Equal(infrav1.OpenStackErrorReason)) - break - } - } - }) - - It("should retrieve instance by name if no ID is stored", func() { - openStackCluster := &infrav1.OpenStackCluster{} - machine := &clusterv1.Machine{} - openStackMachine := &infrav1.OpenStackMachine{} - servers := make([]servers.Server, 1) - servers[0].ID = "machine-uuid" - - mockScopeFactory.ComputeClient.EXPECT().ListServers(gomock.Any()).Return(servers, nil) - instanceStatus, err := reconsiler.getOrCreateInstance(logger, openStackCluster, machine, openStackMachine, computeService, "", []string{}) - Expect(err).ToNot(HaveOccurred()) - Expect(instanceStatus).ToNot(BeNil()) - Expect(instanceStatus.ID()).To(Equal("machine-uuid")) - }) -}) diff --git a/docs/book/src/api/v1alpha1/api.md b/docs/book/src/api/v1alpha1/api.md index fa456a74f9..3e7e1e0047 100644 --- a/docs/book/src/api/v1alpha1/api.md +++ b/docs/book/src/api/v1alpha1/api.md @@ -3,7 +3,287 @@
package v1alpha1 contains API Schema definitions for the infrastructure v1alpha1 API group
Resource Types: -+
OpenStackServer is the Schema for the openstackservers API.
+ +| Field | +Description | +||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
+apiVersion+string |
+
+
+infrastructure.cluster.x-k8s.io/v1alpha1
+
+ |
+||||||||||||||||||||||||||||||||
+kind+string + |
+OpenStackServer |
+||||||||||||||||||||||||||||||||
+metadata+ +Kubernetes meta/v1.ObjectMeta + + |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+||||||||||||||||||||||||||||||||
+spec+ + +OpenStackServerSpec + + + |
+
+ + +
|
+||||||||||||||||||||||||||||||||
+status+ + +OpenStackServerStatus + + + |
++ | +
@@ -287,28 +567,469 @@ sigs.k8s.io/cluster-api/api/v1beta1.Conditions -
string alias)(Appears on: -OpenStackFloatingIPPoolSpec) +OpenStackServer)
-
ReclaimPolicy is a string type alias to represent reclaim policies for floating ips.
+OpenStackServerSpec defines the desired state of OpenStackServer.
| Value | +Field | Description |
|---|---|---|
"Delete" |
-ReclaimDelete is the reclaim policy for floating ips. + | |
+additionalBlockDevices+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.AdditionalBlockDevice + + |
-||
"Retain" |
-ReclaimRetain is the reclaim policy for floating ips. + |
+(Optional)
+ AdditionalBlockDevices is a list of specifications for additional block devices to attach to the server instance. + |
+
+availabilityZone+ +string + + |
+
+(Optional)
+ AvailabilityZone is the availability zone in which to create the server instance. + |
+|
+configDrive+ +bool + + |
+
+(Optional)
+ ConfigDrive is a flag to enable config drive for the server instance. + |
+|
+flavor+ +string + + |
+
+ The flavor reference for the flavor for the server instance. |
+|
+floatingIPPoolRef+ +Kubernetes core/v1.TypedLocalObjectReference + + |
+
+(Optional)
+ FloatingIPPoolRef is a reference to a FloatingIPPool to allocate a floating IP from. + |
+|
+identityRef+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.OpenStackIdentityReference + + + |
+
+ IdentityRef is a reference to a secret holding OpenStack credentials. + |
+|
+image+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ImageParam + + + |
+
+ The image to use for the server instance. + |
+|
+ports+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortOpts + + + |
+
+ Ports to be attached to the server instance. + |
+|
+rootVolume+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.RootVolume + + + |
+
+(Optional)
+ RootVolume is the specification for the root volume of the server instance. + |
+|
+sshKeyName+ +string + + |
+
+ SSHKeyName is the name of the SSH key to inject in the instance. + |
+|
+securityGroups+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.SecurityGroupParam + + + |
+
+(Optional)
+ SecurityGroups is a list of security groups names to assign to the instance. + |
+|
+serverGroup+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerGroupParam + + + |
+
+(Optional)
+ ServerGroup is the server group to which the server instance belongs. + |
+|
+serverMetadata+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerMetadata + + + |
+
+(Optional)
+ ServerMetadata is a map of key value pairs to add to the server instance. + |
+|
+tags+ +[]string + + |
+
+ Tags which will be added to the machine and all dependent resources +which support them. These are in addition to Tags defined on the +cluster. +Requires Nova api 2.52 minimum! + |
+|
+trunk+ +bool + + |
+
+(Optional)
+ Trunk is a flag to indicate if the server instance is created on a trunk port or not. + |
+|
+userDataRef+ +Kubernetes core/v1.LocalObjectReference + + |
+
+(Optional)
+ UserDataRef is a reference to a secret containing the user data to +be injected into the server instance. + |
+
+(Appears on: +OpenStackServer) +
++
OpenStackServerStatus defines the observed state of OpenStackServer.
+ +| Field | +Description | +
|---|---|
+ready+ +bool + + |
+
+ Ready is true when the OpenStack server is ready. + |
+
+instanceID+ +string + + |
+
+(Optional)
+ InstanceID is the ID of the server instance. + |
+
+instanceState+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.InstanceState + + + |
+
+(Optional)
+ InstanceState is the state of the server instance. + |
+
+addresses+ +[]Kubernetes core/v1.NodeAddress + + |
+
+(Optional)
+ Addresses is the list of addresses of the server instance. + |
+
+resolved+ + +ResolvedServerSpec + + + |
+
+(Optional)
+ Resolved contains parts of the machine spec with all external +references fully resolved. + |
+
+resources+ + +ServerResources + + + |
+
+(Optional)
+ Resources contains references to OpenStack resources created for the machine. + |
+
+conditions+ + +sigs.k8s.io/cluster-api/api/v1beta1.Conditions + + + |
+
+(Optional)
+ Conditions defines current service state of the OpenStackServer. + |
+
string alias)+(Appears on: +OpenStackFloatingIPPoolSpec) +
++
ReclaimPolicy is a string type alias to represent reclaim policies for floating ips.
+ +| Value | +Description | +
|---|---|
"Delete" |
+ReclaimDelete is the reclaim policy for floating ips. + |
+
"Retain" |
+ReclaimRetain is the reclaim policy for floating ips. + |
+
+(Appears on: +OpenStackServerStatus) +
++
ResolvedServerSpec contains resolved references to resources required by the server.
+ +| Field | +Description | +
|---|---|
+serverGroupID+ +string + + |
+
+(Optional)
+ ServerGroupID is the ID of the server group the server should be added to and is calculated based on ServerGroupFilter. + |
+
+imageID+ +string + + |
+
+(Optional)
+ ImageID is the ID of the image to use for the server and is calculated based on ImageFilter. + |
+
+ports+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ResolvedPortSpec + + + |
+
+(Optional)
+ Ports is the fully resolved list of ports to create for the server. + |
+
+(Appears on: +OpenStackServerStatus) +
++
ServerResources contains references to OpenStack resources created for the server.
+ +| Field | +Description | +
|---|---|
+ports+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortStatus + + + |
+
+(Optional)
+ Ports is the status of the ports created for the server. + |
+
string alias)+
+| Value | +Description | +
|---|---|
"CreateError" |
+