diff --git a/PROJECT b/PROJECT index 9db67d8145..26589c0275 100644 --- a/PROJECT +++ b/PROJECT @@ -44,4 +44,7 @@ resources: - group: infrastructure kind: OpenStackFloatingIPPool version: v1alpha1 +- group: infrastructure + kind: OpenStackServer + version: v1alpha1 version: "2" diff --git a/api/v1alpha1/conditions_consts.go b/api/v1alpha1/conditions_consts.go index e40c728ff8..018e580ad2 100644 --- a/api/v1alpha1/conditions_consts.go +++ b/api/v1alpha1/conditions_consts.go @@ -16,6 +16,8 @@ limitations under the License. package v1alpha1 +type ServerStatusError string + const ( // OpenstackFloatingIPPoolReadyCondition reports on the current status of the floating ip pool. Ready indicates that the pool is ready to be used. OpenstackFloatingIPPoolReadyCondition = "OpenstackFloatingIPPoolReadyCondition" @@ -25,4 +27,6 @@ const ( // UnableToFindFloatingIPNetworkReason is used when the floating ip network is not found. UnableToFindNetwork = "UnableToFindNetwork" + + CreateServerError ServerStatusError = "CreateError" ) diff --git a/api/v1alpha1/openstackserver_types.go b/api/v1alpha1/openstackserver_types.go new file mode 100644 index 0000000000..c82e27b0b8 --- /dev/null +++ b/api/v1alpha1/openstackserver_types.go @@ -0,0 +1,188 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/optional" +) + +const ( + // OpenStackServerFinalizer allows ReconcileOpenStackServer to clean up resources associated with OpenStackServer before + // removing it from the apiserver. + OpenStackServerFinalizer = "openstackserver.infrastructure.cluster.x-k8s.io" +) + +// OpenStackServerSpec defines the desired state of OpenStackServer. +type OpenStackServerSpec struct { + // AdditionalBlockDevices is a list of specifications for additional block devices to attach to the server instance. + // +listType=map + // +listMapKey=name + // +optional + AdditionalBlockDevices []infrav1.AdditionalBlockDevice `json:"additionalBlockDevices,omitempty"` + + // AvailabilityZone is the availability zone in which to create the server instance. + //+optional + AvailabilityZone optional.String `json:"availabilityZone,omitempty"` + + // ConfigDrive is a flag to enable config drive for the server instance. + // +optional + ConfigDrive optional.Bool `json:"configDrive,omitempty"` + + // The flavor reference for the flavor for the server instance. + // +required + Flavor string `json:"flavor"` + + // FloatingIPPoolRef is a reference to a FloatingIPPool to allocate a floating IP from. + // +optional + FloatingIPPoolRef *corev1.TypedLocalObjectReference `json:"floatingIPPoolRef,omitempty"` + + // IdentityRef is a reference to a secret holding OpenStack credentials. + // +required + IdentityRef infrav1.OpenStackIdentityReference `json:"identityRef"` + + // The image to use for the server instance. + // +required + Image infrav1.ImageParam `json:"image"` + + // Ports to be attached to the server instance. + // +required + Ports []infrav1.PortOpts `json:"ports"` + + // RootVolume is the specification for the root volume of the server instance. + // +optional + RootVolume *infrav1.RootVolume `json:"rootVolume,omitempty"` + + // SSHKeyName is the name of the SSH key to inject in the instance. + // +required + SSHKeyName string `json:"sshKeyName"` + + // SecurityGroups is a list of security groups names to assign to the instance. + // +optional + SecurityGroups []infrav1.SecurityGroupParam `json:"securityGroups,omitempty"` + + // ServerGroup is the server group to which the server instance belongs. + // +optional + ServerGroup *infrav1.ServerGroupParam `json:"serverGroup,omitempty"` + + // ServerMetadata is a map of key value pairs to add to the server instance. + // +listType=map + // +listMapKey=key + // +optional + ServerMetadata []infrav1.ServerMetadata `json:"serverMetadata,omitempty"` + + // Tags which will be added to the machine and all dependent resources + // which support them. These are in addition to Tags defined on the + // cluster. + // Requires Nova api 2.52 minimum! + // +listType=set + Tags []string `json:"tags,omitempty"` + + // Trunk is a flag to indicate if the server instance is created on a trunk port or not. + // +optional + Trunk optional.Bool `json:"trunk,omitempty"` + + // UserDataRef is a reference to a secret containing the user data to + // be injected into the server instance. + // +optional + UserDataRef *corev1.LocalObjectReference `json:"userDataRef,omitempty"` +} + +// OpenStackServerStatus defines the observed state of OpenStackServer. +type OpenStackServerStatus struct { + // Ready is true when the OpenStack server is ready. + // +kubebuilder:default=false + Ready bool `json:"ready"` + + // InstanceID is the ID of the server instance. + // +optional + InstanceID optional.String `json:"instanceID,omitempty"` + + // InstanceState is the state of the server instance. + // +optional + InstanceState *infrav1.InstanceState `json:"instanceState,omitempty"` + + // Addresses is the list of addresses of the server instance. + // +optional + Addresses []corev1.NodeAddress `json:"addresses,omitempty"` + + // Resolved contains parts of the machine spec with all external + // references fully resolved. + // +optional + Resolved *ResolvedServerSpec `json:"resolved,omitempty"` + + // Resources contains references to OpenStack resources created for the machine. + // +optional + Resources *ServerResources `json:"resources,omitempty"` + + // Conditions defines current service state of the OpenStackServer. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:path=openstackservers,scope=Namespaced,categories=cluster-api,shortName=oss +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="InstanceState",type="string",JSONPath=".status.instanceState",description="OpenStack instance state" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="OpenStack instance ready status" +// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".status.instanceID",description="OpenStack instance ID" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of OpenStack instance" + +// OpenStackServer is the Schema for the openstackservers API. +type OpenStackServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OpenStackServerSpec `json:"spec,omitempty"` + Status OpenStackServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OpenStackServerList contains a list of OpenStackServer. +type OpenStackServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackServer `json:"items"` +} + +// GetConditions returns the observations of the operational state of the OpenStackServer resource. +func (r *OpenStackServer) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the OpenStackServer to the predescribed clusterv1.Conditions. +func (r *OpenStackServer) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + +var _ infrav1.IdentityRefProvider = &OpenStackFloatingIPPool{} + +// GetIdentifyRef returns the Server's namespace and IdentityRef. +func (r *OpenStackServer) GetIdentityRef() (*string, *infrav1.OpenStackIdentityReference) { + return &r.Namespace, &r.Spec.IdentityRef +} + +func init() { + SchemeBuilder.Register(&OpenStackServer{}, &OpenStackServerList{}) +} diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go new file mode 100644 index 0000000000..2f339adaae --- /dev/null +++ b/api/v1alpha1/types.go @@ -0,0 +1,43 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" +) + +// ResolvedServerSpec contains resolved references to resources required by the server. +type ResolvedServerSpec struct { + // ServerGroupID is the ID of the server group the server should be added to and is calculated based on ServerGroupFilter. + // +optional + ServerGroupID string `json:"serverGroupID,omitempty"` + + // ImageID is the ID of the image to use for the server and is calculated based on ImageFilter. + // +optional + ImageID string `json:"imageID,omitempty"` + + // Ports is the fully resolved list of ports to create for the server. + // +optional + Ports []infrav1.ResolvedPortSpec `json:"ports,omitempty"` +} + +// ServerResources contains references to OpenStack resources created for the server. +type ServerResources struct { + // Ports is the status of the ports created for the server. + // +optional + Ports []infrav1.PortStatus `json:"ports,omitempty"` +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1dc4b7d3d1..55a7485a71 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -157,3 +158,234 @@ func (in *OpenStackFloatingIPPoolStatus) DeepCopy() *OpenStackFloatingIPPoolStat in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServer) DeepCopyInto(out *OpenStackServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServer. +func (in *OpenStackServer) DeepCopy() *OpenStackServer { + if in == nil { + return nil + } + out := new(OpenStackServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServerList) DeepCopyInto(out *OpenStackServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerList. +func (in *OpenStackServerList) DeepCopy() *OpenStackServerList { + if in == nil { + return nil + } + out := new(OpenStackServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServerSpec) DeepCopyInto(out *OpenStackServerSpec) { + *out = *in + if in.AdditionalBlockDevices != nil { + in, out := &in.AdditionalBlockDevices, &out.AdditionalBlockDevices + *out = make([]v1beta1.AdditionalBlockDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailabilityZone != nil { + in, out := &in.AvailabilityZone, &out.AvailabilityZone + *out = new(string) + **out = **in + } + if in.ConfigDrive != nil { + in, out := &in.ConfigDrive, &out.ConfigDrive + *out = new(bool) + **out = **in + } + if in.FloatingIPPoolRef != nil { + in, out := &in.FloatingIPPoolRef, &out.FloatingIPPoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + out.IdentityRef = in.IdentityRef + in.Image.DeepCopyInto(&out.Image) + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1beta1.PortOpts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(v1beta1.RootVolume) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]v1beta1.SecurityGroupParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerGroup != nil { + in, out := &in.ServerGroup, &out.ServerGroup + *out = new(v1beta1.ServerGroupParam) + (*in).DeepCopyInto(*out) + } + if in.ServerMetadata != nil { + in, out := &in.ServerMetadata, &out.ServerMetadata + *out = make([]v1beta1.ServerMetadata, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Trunk != nil { + in, out := &in.Trunk, &out.Trunk + *out = new(bool) + **out = **in + } + if in.UserDataRef != nil { + in, out := &in.UserDataRef, &out.UserDataRef + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerSpec. +func (in *OpenStackServerSpec) DeepCopy() *OpenStackServerSpec { + if in == nil { + return nil + } + out := new(OpenStackServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackServerStatus) DeepCopyInto(out *OpenStackServerStatus) { + *out = *in + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(v1beta1.InstanceState) + **out = **in + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1.NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Resolved != nil { + in, out := &in.Resolved, &out.Resolved + *out = new(ResolvedServerSpec) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ServerResources) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(apiv1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerStatus. +func (in *OpenStackServerStatus) DeepCopy() *OpenStackServerStatus { + if in == nil { + return nil + } + out := new(OpenStackServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolvedServerSpec) DeepCopyInto(out *ResolvedServerSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1beta1.ResolvedPortSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolvedServerSpec. +func (in *ResolvedServerSpec) DeepCopy() *ResolvedServerSpec { + if in == nil { + return nil + } + out := new(ResolvedServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerResources) DeepCopyInto(out *ServerResources) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1beta1.PortStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerResources. +func (in *ServerResources) DeepCopy() *ServerResources { + if in == nil { + return nil + } + out := new(ServerResources) + in.DeepCopyInto(out) + return out +} diff --git a/api_violations.report b/api_violations.report index 3482cb6c0e..52c6766e82 100644 --- a/api_violations.report +++ b/api_violations.report @@ -97,6 +97,11 @@ API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackFloatingIPPoolStatus,AvailableIPs API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackFloatingIPPoolStatus,ClaimedIPs API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackFloatingIPPoolStatus,FailedIPs +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackServerSpec,Ports +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackServerSpec,SecurityGroups +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,OpenStackServerStatus,Addresses +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,ResolvedServerSpec,Ports +API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1,ServerResources,Ports API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6,APIServerLoadBalancer,AdditionalPorts API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6,APIServerLoadBalancer,AllowedCIDRs API rule violation: list_type_missing,sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6,Instance,Networks diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml new file mode 100644 index 0000000000..c0a3a4c625 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml @@ -0,0 +1,1128 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: openstackservers.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: OpenStackServer + listKind: OpenStackServerList + plural: openstackservers + shortNames: + - oss + singular: openstackserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: OpenStack instance state + jsonPath: .status.instanceState + name: InstanceState + type: string + - description: OpenStack instance ready status + jsonPath: .status.ready + name: Ready + type: string + - description: OpenStack instance ID + jsonPath: .status.instanceID + name: InstanceID + type: string + - description: Time duration since creation of OpenStack instance + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: OpenStackServer is the Schema for the openstackservers API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpenStackServerSpec defines the desired state of OpenStackServer. + properties: + additionalBlockDevices: + description: AdditionalBlockDevices is a list of specifications for + additional block devices to attach to the server instance. + items: + description: AdditionalBlockDevice is a block device to attach to + the server. + properties: + name: + description: |- + Name of the block device in the context of a machine. + If the block device is a volume, the Cinder volume will be named + as a combination of the machine name and this name. + Also, this name will be used for tagging the block device. + Information about the block device tag can be obtained from the OpenStack + metadata API or the config drive. + Name cannot be 'root', which is reserved for the root volume. + type: string + sizeGiB: + description: SizeGiB is the size of the block device in gibibytes + (GiB). + minimum: 1 + type: integer + storage: + description: |- + Storage specifies the storage type of the block device and + additional storage options. + properties: + type: + description: |- + Type is the type of block device to create. + This can be either "Volume" or "Local". + type: string + volume: + description: Volume contains additional storage options + for a volume block device. + properties: + availabilityZone: + description: |- + AvailabilityZone is the volume availability zone to create the volume + in. If not specified, the volume will be created without an explicit + availability zone. + properties: + from: + default: Name + description: |- + From specifies where we will obtain the availability zone for the + volume. The options are "Name" and "Machine". If "Name" is specified + then the Name field must also be specified. If "Machine" is specified + the volume will use the value of FailureDomain, if any, from the + associated Machine. + enum: + - Name + - Machine + type: string + name: + description: |- + Name is the name of a volume availability zone to use. It is required + if From is "Name". The volume availability zone name may not contain + spaces. + minLength: 1 + pattern: ^[^ ]+$ + type: string + type: object + x-kubernetes-validations: + - message: name is required when from is 'Name' or default + rule: '!has(self.from) || self.from == ''Name'' ? + has(self.name) : !has(self.name)' + type: + description: |- + Type is the Cinder volume type of the volume. + If omitted, the default Cinder volume type that is configured in the OpenStack cloud + will be used. + type: string + type: object + required: + - type + type: object + required: + - name + - sizeGiB + - storage + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + availabilityZone: + description: AvailabilityZone is the availability zone in which to + create the server instance. + type: string + configDrive: + description: ConfigDrive is a flag to enable config drive for the + server instance. + type: boolean + flavor: + description: The flavor reference for the flavor for the server instance. + type: string + floatingIPPoolRef: + description: FloatingIPPoolRef is a reference to a FloatingIPPool + to allocate a floating IP from. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + identityRef: + description: IdentityRef is a reference to a secret holding OpenStack + credentials. + properties: + cloudName: + description: CloudName specifies the name of the entry in the + clouds.yaml file to use. + type: string + name: + description: |- + Name is the name of a secret in the same namespace as the resource being provisioned. + The secret must contain a key named `clouds.yaml` which contains an OpenStack clouds.yaml file. + The secret may optionally contain a key named `cacert` containing a PEM-encoded CA certificate. + type: string + required: + - cloudName + - name + type: object + image: + description: The image to use for the server instance. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: |- + Filter describes a query for an image. If specified, the combination + of name and tags must return a single matching image or an error will + be raised. + minProperties: 1 + properties: + name: + description: The name of the desired image. If specified, + the combination of name and tags must return a single matching + image or an error will be raised. + type: string + tags: + description: The tags associated with the desired image. If + specified, the combination of name and tags must return + a single matching image or an error will be raised. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the uuid of the image. ID will not be validated + before use. + format: uuid + type: string + type: object + ports: + description: Ports to be attached to the server instance. + items: + properties: + adminStateUp: + description: AdminStateUp specifies whether the port should + be created in the up (true) or down (false) state. The default + is up. + type: boolean + allowedAddressPairs: + description: |- + AllowedAddressPairs is a list of address pairs which Neutron will + allow the port to send traffic from in addition to the port's + addresses. If not specified, the MAC Address will be the MAC Address + of the port. Depending on the configuration of Neutron, it may be + supported to specify a CIDR instead of a specific IP address. + items: + properties: + ipAddress: + description: |- + IPAddress is the IP address of the allowed address pair. Depending on + the configuration of Neutron, it may be supported to specify a CIDR + instead of a specific IP address. + type: string + macAddress: + description: |- + MACAddress is the MAC address of the allowed address pair. If not + specified, the MAC address will be the MAC address of the port. + type: string + required: + - ipAddress + type: object + type: array + description: + description: Description is a human-readable description for + the port. + type: string + disablePortSecurity: + description: |- + DisablePortSecurity enables or disables the port security when set. + When not set, it takes the value of the corresponding field at the network level. + type: boolean + fixedIPs: + description: FixedIPs is a list of pairs of subnet and/or IP + address to assign to the port. If specified, these must be + subnets of the port's network. + items: + properties: + ipAddress: + description: |- + IPAddress is a specific IP address to assign to the port. If Subnet + is also specified, IPAddress must be a valid IP address in the + subnet. If Subnet is not specified, IPAddress must be a valid IP + address in any subnet of the port's network. + type: string + subnet: + description: |- + Subnet is an openstack subnet query that will return the id of a subnet to create + the fixed IP of a port in. This query must not return more than one subnet. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a filter to select the + subnet. It must match exactly one subnet. + minProperties: 1 + properties: + cidr: + type: string + description: + type: string + gatewayIP: + type: string + ipVersion: + type: integer + ipv6AddressMode: + type: string + ipv6RAMode: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the uuid of the subnet. It will + not be validated. + format: uuid + type: string + type: object + type: object + type: array + x-kubernetes-list-type: atomic + hostID: + description: HostID specifies the ID of the host where the port + resides. + type: string + macAddress: + description: MACAddress specifies the MAC address of the port. + If not specified, the MAC address will be generated. + type: string + nameSuffix: + description: NameSuffix will be appended to the name of the + port if specified. If unspecified, instead the 0-based index + of the port in the list is used. + type: string + network: + description: |- + Network is a query for an openstack network that the port will be created or discovered on. + This will fail if the query returns more than one network. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a filter to select an OpenStack + network. If provided, cannot be empty. + minProperties: 1 + properties: + description: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the ID of the network to use. If ID is + provided, the other filters cannot be provided. Must be + in UUID format. + format: uuid + type: string + type: object + profile: + description: |- + Profile is a set of key-value pairs that are used for binding + details. We intentionally don't expose this as a map[string]string + because we only want to enable the users to set the values of the + keys that are known to work in OpenStack Networking API. See + https://docs.openstack.org/api-ref/network/v2/index.html?expanded=create-port-detail#create-port + To set profiles, your tenant needs permissions rule:create_port, and + rule:create_port:binding:profile + properties: + ovsHWOffload: + description: |- + OVSHWOffload enables or disables the OVS hardware offload feature. + This flag is not required on OpenStack clouds since Yoga as Nova will set it automatically when the port is attached. + See: https://bugs.launchpad.net/nova/+bug/2020813 + type: boolean + trustedVF: + description: TrustedVF enables or disables the “trusted + mode” for the VF. + type: boolean + type: object + propagateUplinkStatus: + description: PropageteUplinkStatus enables or disables the propagate + uplink status on the port. + type: boolean + securityGroups: + description: SecurityGroups is a list of the names, uuids, filters + or any combination these of the security groups to assign + to the instance. + items: + description: SecurityGroupParam specifies an OpenStack security + group. It may be specified by ID or filter, but not both. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a query to select an OpenStack + security group. If provided, cannot be empty. + minProperties: 1 + properties: + description: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the ID of the security group to use. + If ID is provided, the other filters cannot be provided. + Must be in UUID format. + format: uuid + type: string + type: object + type: array + x-kubernetes-list-type: atomic + tags: + description: |- + Tags applied to the port (and corresponding trunk, if a trunk is configured.) + These tags are applied in addition to the instance's tags, which will also be applied to the port. + items: + type: string + type: array + x-kubernetes-list-type: set + trunk: + description: |- + Trunk specifies whether trunking is enabled at the port level. If not + provided the value is inherited from the machine, or false for a + bastion host. + type: boolean + valueSpecs: + description: |- + Value specs are extra parameters to include in the API request with OpenStack. + This is an extension point for the API, so what they do and if they are supported, + depends on the specific OpenStack implementation. + items: + description: ValueSpec represents a single value_spec key-value + pair. + properties: + key: + description: Key is the key in the key-value pair. + type: string + name: + description: |- + Name is the name of the key-value pair. + This is just for identifying the pair and will not be sent to the OpenStack API. + type: string + value: + description: Value is the value in the key-value pair. + type: string + required: + - key + - name + - value + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vnicType: + description: |- + VNICType specifies the type of vNIC which this port should be + attached to. This is used to determine which mechanism driver(s) to + be used to bind the port. The valid values are normal, macvtap, + direct, baremetal, direct-physical, virtio-forwarder, smart-nic and + remote-managed, although these values will not be validated in this + API to ensure compatibility with future neutron changes or custom + implementations. What type of vNIC is actually available depends on + deployments. If not specified, the Neutron default value is used. + type: string + type: object + type: array + rootVolume: + description: RootVolume is the specification for the root volume of + the server instance. + properties: + availabilityZone: + description: |- + AvailabilityZone is the volume availability zone to create the volume + in. If not specified, the volume will be created without an explicit + availability zone. + properties: + from: + default: Name + description: |- + From specifies where we will obtain the availability zone for the + volume. The options are "Name" and "Machine". If "Name" is specified + then the Name field must also be specified. If "Machine" is specified + the volume will use the value of FailureDomain, if any, from the + associated Machine. + enum: + - Name + - Machine + type: string + name: + description: |- + Name is the name of a volume availability zone to use. It is required + if From is "Name". The volume availability zone name may not contain + spaces. + minLength: 1 + pattern: ^[^ ]+$ + type: string + type: object + x-kubernetes-validations: + - message: name is required when from is 'Name' or default + rule: '!has(self.from) || self.from == ''Name'' ? has(self.name) + : !has(self.name)' + sizeGiB: + description: SizeGiB is the size of the block device in gibibytes + (GiB). + minimum: 1 + type: integer + type: + description: |- + Type is the Cinder volume type of the volume. + If omitted, the default Cinder volume type that is configured in the OpenStack cloud + will be used. + type: string + required: + - sizeGiB + type: object + securityGroups: + description: SecurityGroups is a list of security groups names to + assign to the instance. + items: + description: SecurityGroupParam specifies an OpenStack security + group. It may be specified by ID or filter, but not both. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a query to select an OpenStack + security group. If provided, cannot be empty. + minProperties: 1 + properties: + description: + type: string + name: + type: string + notTags: + description: |- + NotTags is a list of tags to filter by. If specified, resources which + contain all of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + notTagsAny: + description: |- + NotTagsAny is a list of tags to filter by. If specified, resources + which contain any of the given tags will be excluded from the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + projectID: + type: string + tags: + description: |- + Tags is a list of tags to filter by. If specified, the resource must + have all of the tags specified to be included in the result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + tagsAny: + description: |- + TagsAny is a list of tags to filter by. If specified, the resource + must have at least one of the tags specified to be included in the + result. + items: + description: |- + NeutronTag represents a tag on a Neutron resource. + It may not be empty and may not contain commas. + minLength: 1 + pattern: ^[^,]+$ + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: ID is the ID of the security group to use. If ID + is provided, the other filters cannot be provided. Must be + in UUID format. + format: uuid + type: string + type: object + type: array + serverGroup: + description: ServerGroup is the server group to which the server instance + belongs. + maxProperties: 1 + minProperties: 1 + properties: + filter: + description: Filter specifies a query to select an OpenStack server + group. If provided, it cannot be empty. + minProperties: 1 + properties: + name: + description: Name is the name of a server group to look for. + type: string + type: object + id: + description: ID is the ID of the server group to use. + format: uuid + type: string + type: object + serverMetadata: + description: ServerMetadata is a map of key value pairs to add to + the server instance. + items: + properties: + key: + description: Key is the server metadata key + maxLength: 255 + type: string + value: + description: Value is the server metadata value + maxLength: 255 + type: string + required: + - key + - value + type: object + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + sshKeyName: + description: SSHKeyName is the name of the SSH key to inject in the + instance. + type: string + tags: + description: |- + Tags which will be added to the machine and all dependent resources + which support them. These are in addition to Tags defined on the + cluster. + Requires Nova api 2.52 minimum! + items: + type: string + type: array + x-kubernetes-list-type: set + trunk: + description: Trunk is a flag to indicate if the server instance is + created on a trunk port or not. + type: boolean + userDataRef: + description: |- + UserDataRef is a reference to a secret containing the user data to + be injected into the server instance. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - flavor + - identityRef + - image + - ports + - sshKeyName + type: object + status: + description: OpenStackServerStatus defines the observed state of OpenStackServer. + properties: + addresses: + description: Addresses is the list of addresses of the server instance. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: Conditions defines current service state of the OpenStackServer. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. + type: string + severity: + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + instanceID: + description: InstanceID is the ID of the server instance. + type: string + instanceState: + description: InstanceState is the state of the server instance. + type: string + ready: + default: false + description: Ready is true when the OpenStack server is ready. + type: boolean + resolved: + description: |- + Resolved contains parts of the machine spec with all external + references fully resolved. + properties: + imageID: + description: ImageID is the ID of the image to use for the server + and is calculated based on ImageFilter. + type: string + ports: + description: Ports is the fully resolved list of ports to create + for the server. + items: + description: ResolvedPortSpec is a PortOpts with all contained + references fully resolved. + properties: + adminStateUp: + description: AdminStateUp specifies whether the port should + be created in the up (true) or down (false) state. The + default is up. + type: boolean + allowedAddressPairs: + description: |- + AllowedAddressPairs is a list of address pairs which Neutron will + allow the port to send traffic from in addition to the port's + addresses. If not specified, the MAC Address will be the MAC Address + of the port. Depending on the configuration of Neutron, it may be + supported to specify a CIDR instead of a specific IP address. + items: + properties: + ipAddress: + description: |- + IPAddress is the IP address of the allowed address pair. Depending on + the configuration of Neutron, it may be supported to specify a CIDR + instead of a specific IP address. + type: string + macAddress: + description: |- + MACAddress is the MAC address of the allowed address pair. If not + specified, the MAC address will be the MAC address of the port. + type: string + required: + - ipAddress + type: object + type: array + description: + description: Description is a human-readable description + for the port. + type: string + disablePortSecurity: + description: |- + DisablePortSecurity enables or disables the port security when set. + When not set, it takes the value of the corresponding field at the network level. + type: boolean + fixedIPs: + description: FixedIPs is a list of pairs of subnet and/or + IP address to assign to the port. If specified, these + must be subnets of the port's network. + items: + description: ResolvedFixedIP is a FixedIP with the Subnet + resolved to an ID. + properties: + ipAddress: + description: |- + IPAddress is a specific IP address to assign to the port. If SubnetID + is also specified, IPAddress must be a valid IP address in the + subnet. If Subnet is not specified, IPAddress must be a valid IP + address in any subnet of the port's network. + type: string + subnet: + description: SubnetID is the id of a subnet to create + the fixed IP of a port in. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + hostID: + description: HostID specifies the ID of the host where the + port resides. + type: string + macAddress: + description: MACAddress specifies the MAC address of the + port. If not specified, the MAC address will be generated. + type: string + name: + description: Name is the name of the port. + type: string + networkID: + description: NetworkID is the ID of the network the port + will be created in. + type: string + profile: + description: |- + Profile is a set of key-value pairs that are used for binding + details. We intentionally don't expose this as a map[string]string + because we only want to enable the users to set the values of the + keys that are known to work in OpenStack Networking API. See + https://docs.openstack.org/api-ref/network/v2/index.html?expanded=create-port-detail#create-port + To set profiles, your tenant needs permissions rule:create_port, and + rule:create_port:binding:profile + properties: + ovsHWOffload: + description: |- + OVSHWOffload enables or disables the OVS hardware offload feature. + This flag is not required on OpenStack clouds since Yoga as Nova will set it automatically when the port is attached. + See: https://bugs.launchpad.net/nova/+bug/2020813 + type: boolean + trustedVF: + description: TrustedVF enables or disables the “trusted + mode” for the VF. + type: boolean + type: object + propagateUplinkStatus: + description: PropageteUplinkStatus enables or disables the + propagate uplink status on the port. + type: boolean + securityGroups: + description: SecurityGroups is a list of security group + IDs to assign to the port. + items: + type: string + type: array + x-kubernetes-list-type: atomic + tags: + description: Tags applied to the port (and corresponding + trunk, if a trunk is configured.) + items: + type: string + type: array + x-kubernetes-list-type: set + trunk: + description: Trunk specifies whether trunking is enabled + at the port level. + type: boolean + valueSpecs: + description: |- + Value specs are extra parameters to include in the API request with OpenStack. + This is an extension point for the API, so what they do and if they are supported, + depends on the specific OpenStack implementation. + items: + description: ValueSpec represents a single value_spec + key-value pair. + properties: + key: + description: Key is the key in the key-value pair. + type: string + name: + description: |- + Name is the name of the key-value pair. + This is just for identifying the pair and will not be sent to the OpenStack API. + type: string + value: + description: Value is the value in the key-value pair. + type: string + required: + - key + - name + - value + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + vnicType: + description: |- + VNICType specifies the type of vNIC which this port should be + attached to. This is used to determine which mechanism driver(s) to + be used to bind the port. The valid values are normal, macvtap, + direct, baremetal, direct-physical, virtio-forwarder, smart-nic and + remote-managed, although these values will not be validated in this + API to ensure compatibility with future neutron changes or custom + implementations. What type of vNIC is actually available depends on + deployments. If not specified, the Neutron default value is used. + type: string + required: + - description + - name + - networkID + type: object + type: array + serverGroupID: + description: ServerGroupID is the ID of the server group the server + should be added to and is calculated based on ServerGroupFilter. + type: string + type: object + resources: + description: Resources contains references to OpenStack resources + created for the machine. + properties: + ports: + description: Ports is the status of the ports created for the + server. + items: + properties: + id: + description: ID is the unique identifier of the port. + type: string + required: + - id + type: object + type: array + type: object + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index a4c9e050cf..23697e7f31 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml - bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml - bases/infrastructure.cluster.x-k8s.io_openstackfloatingippools.yaml +- bases/infrastructure.cluster.x-k8s.io_openstackservers.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 4790f87801..7a31f4c2e0 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -113,6 +113,26 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackservers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackservers/status + verbs: + - get + - patch + - update - apiGroups: - ipam.cluster.x-k8s.io resources: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 032b3756be..d47c93d80a 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -88,3 +88,24 @@ webhooks: resources: - openstackmachinetemplates sideEffects: None +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-openstackserver + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.openstackserver.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - openstackservers + sideEffects: None diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 22fe0b7ec0..9106d27721 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -26,7 +26,9 @@ import ( "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" @@ -45,6 +47,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/loadbalancer" @@ -56,7 +59,7 @@ import ( ) const ( - BastionInstanceHashAnnotation = "infrastructure.cluster.x-k8s.io/bastion-hash" + waitForBastionToReconcile = 15 * time.Second ) // OpenStackClusterReconciler reconciles a OpenStackCluster object. @@ -128,7 +131,7 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req } // Handle non-deleted clusters - return reconcileNormal(scope, cluster, openStackCluster) + return r.reconcileNormal(ctx, scope, cluster, openStackCluster) } func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { @@ -153,14 +156,20 @@ func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope // A bastion may have been created if cluster initialisation previously reached populating the network status // We attempt to delete it even if no status was written, just in case if openStackCluster.Status.Network != nil { - // Attempt to resolve bastion resources before delete. We don't need to worry about starting if the resources have changed on update. - if _, err := resolveBastionResources(scope, clusterResourceName, openStackCluster); err != nil { + if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil { return reconcile.Result{}, err } + } - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return reconcile.Result{}, err - } + // If a bastion server was found, we need to reconcile now until it's actually deleted. + // We don't want to remove the cluster finalizer until the associated OpenStackServer resource is deleted. + bastionServer, err := r.getBastionServer(ctx, openStackCluster, cluster) + if client.IgnoreNotFound(err) != nil { + return reconcile.Result{}, err + } + if bastionServer != nil { + scope.Logger().Info("Waiting for the bastion OpenStackServer object to be deleted", "openStackServer", bastionServer.Name) + return ctrl.Result{Requeue: true}, nil } networkingService, err := networking.NewService(scope) @@ -218,46 +227,7 @@ func contains(arr []string, target string) bool { return false } -func resolveBastionResources(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster) (bool, error) { - // Resolve and store resources for the bastion - if openStackCluster.Spec.Bastion.IsEnabled() { - if openStackCluster.Status.Bastion == nil { - openStackCluster.Status.Bastion = &infrav1.BastionStatus{} - } - if openStackCluster.Spec.Bastion.Spec == nil { - return false, fmt.Errorf("bastion spec is nil when bastion is enabled, this shouldn't happen") - } - resolved := openStackCluster.Status.Bastion.Resolved - if resolved == nil { - resolved = &infrav1.ResolvedMachineSpec{} - openStackCluster.Status.Bastion.Resolved = resolved - } - changed, err := compute.ResolveMachineSpec(scope, - openStackCluster.Spec.Bastion.Spec, resolved, - clusterResourceName, bastionName(clusterResourceName), - openStackCluster, getBastionSecurityGroupID(openStackCluster)) - if err != nil { - return false, err - } - if changed { - // If the resolved machine spec changed we need to restart the reconcile to avoid inconsistencies between reconciles. - return true, nil - } - resources := openStackCluster.Status.Bastion.Resources - if resources == nil { - resources = &infrav1.MachineResources{} - openStackCluster.Status.Bastion.Resources = resources - } - - err = compute.AdoptMachineResources(scope, resolved, resources) - if err != nil { - return false, err - } - } - return false, nil -} - -func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { +func (r *OpenStackClusterReconciler) deleteBastion(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { scope.Logger().Info("Deleting Bastion") computeService, err := compute.NewService(scope) @@ -269,6 +239,11 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac return err } + bastionServer, err := r.getBastionServer(ctx, openStackCluster, cluster) + if client.IgnoreNotFound(err) != nil { + return err + } + if openStackCluster.Status.Bastion != nil && openStackCluster.Status.Bastion.FloatingIP != "" { if err = networkingService.DeleteFloatingIP(openStackCluster, openStackCluster.Status.Bastion.FloatingIP); err != nil { handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete floating IP: %w", err), false) @@ -279,28 +254,14 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac bastionStatus := openStackCluster.Status.Bastion var instanceStatus *compute.InstanceStatus - if bastionStatus != nil && bastionStatus.ID != "" { - instanceStatus, err = computeService.GetInstanceStatus(openStackCluster.Status.Bastion.ID) - if err != nil { - return err - } - } else { - instanceStatus, err = computeService.GetInstanceStatusByName(openStackCluster, bastionName(cluster.Name)) + if bastionStatus != nil && bastionServer != nil && bastionServer.Status.InstanceID != nil { + instanceStatus, err = computeService.GetInstanceStatus(*bastionServer.Status.InstanceID) if err != nil { return err } } - // If no instance was created we currently need to check for orphaned - // volumes. - if instanceStatus == nil { - bastion := openStackCluster.Spec.Bastion - if bastion != nil && bastion.Spec != nil { - if err := computeService.DeleteVolumes(bastionName(cluster.Name), bastion.Spec.RootVolume, bastion.Spec.AdditionalBlockDevices); err != nil { - return fmt.Errorf("delete volumes: %w", err) - } - } - } else { + if instanceStatus != nil { instanceNS, err := instanceStatus.NetworkStatus() if err != nil { return err @@ -316,36 +277,20 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac } } } - - if err = computeService.DeleteInstance(openStackCluster, instanceStatus); err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete bastion: %w", err), false) - return fmt.Errorf("failed to delete bastion: %w", err) - } } - if bastionStatus != nil && bastionStatus.Resources != nil { - trunkSupported, err := networkingService.IsTrunkExtSupported() - if err != nil { - return err - } - for _, port := range bastionStatus.Resources.Ports { - if err := networkingService.DeleteInstanceTrunkAndPort(openStackCluster, port, trunkSupported); err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete port: %w", err), false) - return fmt.Errorf("failed to delete port: %w", err) - } - } - bastionStatus.Resources.Ports = nil + if err := r.reconcileDeleteBastionServer(ctx, scope, openStackCluster, cluster); err != nil { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete bastion: %w", err), false) + return fmt.Errorf("failed to delete bastion: %w", err) } - scope.Logger().Info("Deleted Bastion") - openStackCluster.Status.Bastion = nil - delete(openStackCluster.ObjectMeta.Annotations, BastionInstanceHashAnnotation) + scope.Logger().Info("Deleted Bastion") return nil } -func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam +func (r *OpenStackClusterReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam scope.Logger().Info("Reconciling Cluster") // If the OpenStackCluster doesn't have our finalizer, add it. @@ -364,7 +309,9 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt return reconcile.Result{}, err } - result, err := reconcileBastion(scope, cluster, openStackCluster) + // TODO(emilien) we should do that separately but the reconcileBastion + // should happen after the cluster Ready is true + result, err := r.reconcileBastion(ctx, scope, cluster, openStackCluster) if err != nil { return reconcile.Result{}, err } @@ -399,32 +346,10 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt return reconcile.Result{}, nil } -func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) { +func (r *OpenStackClusterReconciler) reconcileBastion(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) { scope.Logger().V(4).Info("Reconciling Bastion") clusterResourceName := names.ClusterResourceName(cluster) - changed, err := resolveBastionResources(scope, clusterResourceName, openStackCluster) - if err != nil { - return nil, err - } - if changed { - return &reconcile.Result{}, nil - } - - // No Bastion defined - if !openStackCluster.Spec.Bastion.IsEnabled() { - // Delete any existing bastion - if openStackCluster.Status.Bastion != nil { - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return nil, err - } - // Reconcile again before continuing - return &reconcile.Result{}, nil - } - - // Otherwise nothing to do - return nil, nil - } computeService, err := compute.NewService(scope) if err != nil { @@ -436,71 +361,28 @@ func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openS return nil, err } - instanceSpec, err := bastionToInstanceSpec(openStackCluster, cluster) - if err != nil { - return nil, err - } - - bastionHash, err := compute.HashInstanceSpec(instanceSpec) - if err != nil { - return nil, fmt.Errorf("failed computing bastion hash from instance spec: %w", err) + bastionServer, waitingForServer, err := r.reconcileBastionServer(ctx, scope, openStackCluster, cluster) + if err != nil || waitingForServer { + return &reconcile.Result{RequeueAfter: waitForBastionToReconcile}, err } - if bastionHashHasChanged(bastionHash, openStackCluster.ObjectMeta.Annotations) { - scope.Logger().Info("Bastion instance spec has changed, deleting existing bastion") - - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return nil, err - } - - // Add the new annotation and reconcile again before continuing - annotations.AddAnnotations(openStackCluster, map[string]string{BastionInstanceHashAnnotation: bastionHash}) - return &reconcile.Result{}, nil - } - - err = getOrCreateBastionPorts(openStackCluster, networkingService) - if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to get or create ports for bastion: %w", err), false) - return nil, fmt.Errorf("failed to get or create ports for bastion: %w", err) + if bastionServer == nil { + return nil, nil } - bastionPortIDs := GetPortIDs(openStackCluster.Status.Bastion.Resources.Ports) var instanceStatus *compute.InstanceStatus - if openStackCluster.Status.Bastion != nil && openStackCluster.Status.Bastion.ID != "" { - if instanceStatus, err = computeService.GetInstanceStatus(openStackCluster.Status.Bastion.ID); err != nil { - return nil, err - } - } - if instanceStatus == nil { - // Check if there is an existing instance with bastion name, in case where bastion ID would not have been properly stored in cluster status - if instanceStatus, err = computeService.GetInstanceStatusByName(openStackCluster, instanceSpec.Name); err != nil { + if bastionServer != nil && bastionServer.Status.InstanceID != nil { + if instanceStatus, err = computeService.GetInstanceStatus(*bastionServer.Status.InstanceID); err != nil { return nil, err } } if instanceStatus == nil { - instanceStatus, err = computeService.CreateInstance(openStackCluster, instanceSpec, bastionPortIDs) - if err != nil { - return nil, fmt.Errorf("failed to create bastion: %w", err) - } + // At this point we return an error if we don't have an instance status + return nil, fmt.Errorf("bastion instance status is nil") } // Save hash & status as soon as we know we have an instance instanceStatus.UpdateBastionStatus(openStackCluster) - // Make sure that bastion instance has a valid state - switch instanceStatus.State() { - case infrav1.InstanceStateError: - return nil, fmt.Errorf("failed to reconcile bastion, instance state is ERROR") - case infrav1.InstanceStateBuild, infrav1.InstanceStateUndefined: - scope.Logger().Info("Waiting for bastion instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) - return &reconcile.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil - case infrav1.InstanceStateDeleted: - // Not clear why this would happen, so try to clean everything up before reconciling again - if err := deleteBastion(scope, cluster, openStackCluster); err != nil { - return nil, err - } - return &reconcile.Result{}, nil - } - port, err := computeService.GetManagementPort(openStackCluster, instanceStatus) if err != nil { err = fmt.Errorf("getting management port for bastion: %w", err) @@ -549,7 +431,123 @@ func bastionAddFloatingIP(openStackCluster *infrav1.OpenStackCluster, clusterRes return nil, nil } -func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*compute.InstanceSpec, error) { +// reconcileDeleteBastionServer reconciles the OpenStackServer object for the OpenStackCluster bastion. +// It returns nil if the OpenStackServer object is not found, otherwise it returns an error if any. +func (r *OpenStackClusterReconciler) reconcileDeleteBastionServer(ctx context.Context, scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) error { + scope.Logger().Info("Reconciling Bastion delete server") + server := &infrav1alpha1.OpenStackServer{} + err := r.Client.Get(ctx, client.ObjectKey{Namespace: openStackCluster.Namespace, Name: bastionName(cluster.Name)}, server) + if client.IgnoreNotFound(err) != nil { + return err + } + if apierrors.IsNotFound(err) { + return nil + } + + return r.Client.Delete(ctx, server) +} + +// reconcileBastionServer reconciles the OpenStackServer object for the OpenStackCluster bastion. +// It returns the OpenStackServer object, a boolean indicating if the reconciliation should continue +// and an error if any. +func (r *OpenStackClusterReconciler) reconcileBastionServer(ctx context.Context, scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, bool, error) { + server, err := r.getBastionServer(ctx, openStackCluster, cluster) + if client.IgnoreNotFound(err) != nil { + scope.Logger().Error(err, "Failed to get the bastion OpenStackServer object") + return nil, true, err + } + bastionNotFound := apierrors.IsNotFound(err) + + // If the bastion is not enabled, we don't need to create it and continue with the reconciliation. + if bastionNotFound && !openStackCluster.Spec.Bastion.IsEnabled() { + return nil, false, nil + } + + // If the bastion is found but is not enabled, we need to delete it and reconcile. + if !bastionNotFound && !openStackCluster.Spec.Bastion.IsEnabled() { + scope.Logger().Info("Bastion is not enabled, deleting the OpenStackServer object") + if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil { + return nil, true, err + } + return nil, true, nil + } + + // If the bastion is found but the spec has changed, we need to delete it and reconcile. + bastionServerSpec := bastionToOpenStackServerSpec(openStackCluster) + if !bastionNotFound && server != nil && !apiequality.Semantic.DeepEqual(bastionServerSpec, &server.Spec) { + scope.Logger().Info("Bastion spec has changed, re-creating the OpenStackServer object") + if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil { + return nil, true, err + } + return nil, true, nil + } + + // If the bastion is not found, we need to create it. + if bastionNotFound { + scope.Logger().Info("Creating the bastion OpenStackServer object") + server, err = r.createBastionServer(ctx, openStackCluster, cluster) + if err != nil { + return nil, true, err + } + return server, true, nil + } + + // If the bastion server is not ready, we need to wait for it to be ready and reconcile. + if !server.Status.Ready { + scope.Logger().Info("Waiting for the bastion OpenStackServer to be ready") + return server, true, nil + } + + return server, false, nil +} + +// getBastionServer returns the OpenStackServer object for the bastion server. +// It returns the OpenStackServer object and an error if any. +func (r *OpenStackClusterReconciler) getBastionServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, error) { + bastionServer := &infrav1alpha1.OpenStackServer{} + bastionServerName := client.ObjectKey{ + Namespace: openStackCluster.Namespace, + Name: bastionName(cluster.Name), + } + err := r.Client.Get(ctx, bastionServerName, bastionServer) + if err != nil { + return nil, err + } + return bastionServer, nil +} + +// createBastionServer creates the OpenStackServer object for the bastion server. +// It returns the OpenStackServer object and an error if any. +func (r *OpenStackClusterReconciler) createBastionServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, error) { + bastionServerSpec := bastionToOpenStackServerSpec(openStackCluster) + bastionServer := &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + clusterv1.ClusterNameLabel: openStackCluster.Labels[clusterv1.ClusterNameLabel], + }, + Name: bastionName(cluster.Name), + Namespace: openStackCluster.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: openStackCluster.APIVersion, + Kind: openStackCluster.Kind, + Name: openStackCluster.Name, + UID: openStackCluster.UID, + }, + }, + }, + Spec: *bastionServerSpec, + } + + if err := r.Client.Create(ctx, bastionServer); err != nil { + return nil, fmt.Errorf("failed to create bastion server: %w", err) + } + return bastionServer, nil +} + +// bastionToOpenStackServerSpec converts the OpenStackMachineSpec for the bastion to an OpenStackServerSpec. +// It returns the OpenStackServerSpec and an error if any. +func bastionToOpenStackServerSpec(openStackCluster *infrav1.OpenStackCluster) *infrav1alpha1.OpenStackServerSpec { bastion := openStackCluster.Spec.Bastion if bastion == nil { bastion = &infrav1.Bastion{} @@ -559,25 +557,14 @@ func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, cluster * // v1beta1 API validations prevent this from happening in normal circumstances. bastion.Spec = &infrav1.OpenStackMachineSpec{} } - resolved := openStackCluster.Status.Bastion.Resolved - if resolved == nil { - return nil, errors.New("bastion resolved is nil") - } - machineSpec := bastion.Spec - instanceSpec := &compute.InstanceSpec{ - Name: bastionName(cluster.Name), - Flavor: machineSpec.Flavor, - SSHKeyName: machineSpec.SSHKeyName, - ImageID: resolved.ImageID, - RootVolume: machineSpec.RootVolume, - ServerGroupID: resolved.ServerGroupID, - Tags: compute.InstanceTags(machineSpec, openStackCluster), - } + az := "" if bastion.AvailabilityZone != nil { - instanceSpec.FailureDomain = *bastion.AvailabilityZone + az = *bastion.AvailabilityZone } - return instanceSpec, nil + openStackServerSpec := openStackMachineSpecToOpenStackServerSpec(bastion.Spec, openStackCluster.Spec.IdentityRef, compute.InstanceTags(bastion.Spec, openStackCluster), az, nil, getBastionSecurityGroupID(openStackCluster), openStackCluster.Status.Network.ID) + + return openStackServerSpec } func bastionName(clusterResourceName string) string { @@ -597,34 +584,6 @@ func getBastionSecurityGroupID(openStackCluster *infrav1.OpenStackCluster) *stri return nil } -func getOrCreateBastionPorts(openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service) error { - desiredPorts := openStackCluster.Status.Bastion.Resolved.Ports - resources := openStackCluster.Status.Bastion.Resources - if resources == nil { - return errors.New("bastion resources are nil") - } - - if len(desiredPorts) == len(resources.Ports) { - return nil - } - - err := networkingService.CreatePorts(openStackCluster, desiredPorts, resources) - if err != nil { - return fmt.Errorf("failed to create ports for bastion %s: %w", bastionName(openStackCluster.Name), err) - } - - return nil -} - -// bastionHashHasChanged returns a boolean whether if the latest bastion hash, built from the instance spec, has changed or not. -func bastionHashHasChanged(computeHash string, clusterAnnotations map[string]string) bool { - latestHash, ok := clusterAnnotations[BastionInstanceHashAnnotation] - if !ok { - return false - } - return latestHash != computeHash -} - func resolveLoadBalancerNetwork(openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service) error { lbSpec := openStackCluster.Spec.APIServerLoadBalancer if lbSpec.IsEnabled() { diff --git a/controllers/openstackcluster_controller_test.go b/controllers/openstackcluster_controller_test.go index 807f489e24..141b7977c6 100644 --- a/controllers/openstackcluster_controller_test.go +++ b/controllers/openstackcluster_controller_test.go @@ -22,12 +22,7 @@ import ( "reflect" "testing" - "github.com/google/go-cmp/cmp" - "github.com/gophercloud/gophercloud/v2" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets" . "github.com/onsi/ginkgo/v2" //nolint:revive . "github.com/onsi/gomega" //nolint:revive @@ -207,9 +202,6 @@ var _ = Describe("OpenStackCluster controller", func() { testCluster.Status = infrav1.OpenStackClusterStatus{ Bastion: &infrav1.BastionStatus{ ID: "bastion-uuid", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - }, }, } err = k8sClient.Status().Update(ctx, testCluster) @@ -219,257 +211,10 @@ var _ = Describe("OpenStackCluster controller", func() { Expect(err).To(BeNil()) scope := scope.NewWithLogger(clientScope, log) - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.GetServer("bastion-uuid").Return(nil, gophercloud.ErrResourceNotFound{}) - - err = deleteBastion(scope, capiCluster, testCluster) + err = reconciler.deleteBastion(ctx, scope, capiCluster, testCluster) Expect(err).To(BeNil()) Expect(testCluster.Status.Bastion).To(BeNil()) }) - It("should adopt an existing bastion even if its uuid is not stored in status", func() { - testCluster.SetName("adopt-existing-bastion") - testCluster.Spec = infrav1.OpenStackClusterSpec{ - Bastion: &infrav1.Bastion{ - Enabled: ptr.To(true), - Spec: &bastionSpec, - }, - } - err := k8sClient.Create(ctx, testCluster) - Expect(err).To(BeNil()) - err = k8sClient.Create(ctx, capiCluster) - Expect(err).To(BeNil()) - testCluster.Status = infrav1.OpenStackClusterStatus{ - Bastion: &infrav1.BastionStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - }, - Network: &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - Name: "network-name", - ID: "network-id", - }, - }, - } - err = k8sClient.Status().Update(ctx, testCluster) - Expect(err).To(BeNil()) - - log := GinkgoLogr - clientScope, err := mockScopeFactory.NewClientScopeFromObject(ctx, k8sClient, nil, log, testCluster) - Expect(err).To(BeNil()) - scope := scope.NewWithLogger(clientScope, log) - - server := servers.Server{ - ID: "adopted-bastion-uuid", - Status: "ACTIVE", - } - - networkClientRecorder := mockScopeFactory.NetworkClient.EXPECT() - networkClientRecorder.ListPort(gomock.Any()).Return([]ports.Port{{ID: "portID1"}}, nil) - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.ListServers(servers.ListOpts{ - Name: "^capi-cluster-bastion$", - }).Return([]servers.Server{server}, nil) - - networkClientRecorder.ListFloatingIP(floatingips.ListOpts{PortID: "portID1"}).Return(make([]floatingips.FloatingIP, 1), nil) - - res, err := reconcileBastion(scope, capiCluster, testCluster) - expectedStatus := &infrav1.BastionStatus{ - ID: "adopted-bastion-uuid", - State: "ACTIVE", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - } - Expect(testCluster.Status.Bastion).To(Equal(expectedStatus), cmp.Diff(testCluster.Status.Bastion, expectedStatus)) - Expect(err).To(BeNil()) - Expect(res).To(BeNil()) - }) - It("should adopt an existing bastion Floating IP if even if its uuid is not stored in status", func() { - testCluster.SetName("requeue-bastion") - testCluster.Spec = infrav1.OpenStackClusterSpec{ - Bastion: &infrav1.Bastion{ - Enabled: ptr.To(true), - Spec: &bastionSpec, - }, - } - err := k8sClient.Create(ctx, testCluster) - Expect(err).To(BeNil()) - err = k8sClient.Create(ctx, capiCluster) - Expect(err).To(BeNil()) - testCluster.Status = infrav1.OpenStackClusterStatus{ - Network: &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - Name: "network-name", - ID: "network-id", - }, - }, - Bastion: &infrav1.BastionStatus{ - ID: "adopted-fip-bastion-uuid", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - }, - } - err = k8sClient.Status().Update(ctx, testCluster) - Expect(err).To(BeNil()) - - log := GinkgoLogr - clientScope, err := mockScopeFactory.NewClientScopeFromObject(ctx, k8sClient, nil, log, testCluster) - Expect(err).To(BeNil()) - scope := scope.NewWithLogger(clientScope, log) - - server := servers.Server{ - ID: "adopted-fip-bastion-uuid", - Status: "ACTIVE", - } - - networkClientRecorder := mockScopeFactory.NetworkClient.EXPECT() - networkClientRecorder.ListPort(gomock.Any()).Return([]ports.Port{{ID: "portID1"}}, nil) - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.GetServer("adopted-fip-bastion-uuid").Return(&server, nil) - - networkClientRecorder.ListFloatingIP(floatingips.ListOpts{PortID: "portID1"}).Return([]floatingips.FloatingIP{{FloatingIP: "1.2.3.4"}}, nil) - - res, err := reconcileBastion(scope, capiCluster, testCluster) - Expect(testCluster.Status.Bastion).To(Equal(&infrav1.BastionStatus{ - ID: "adopted-fip-bastion-uuid", - FloatingIP: "1.2.3.4", - State: "ACTIVE", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - })) - Expect(err).To(BeNil()) - Expect(res).To(BeNil()) - }) - It("should requeue until bastion becomes active", func() { - testCluster.SetName("requeue-bastion") - testCluster.Spec = infrav1.OpenStackClusterSpec{ - Bastion: &infrav1.Bastion{ - Enabled: ptr.To(true), - Spec: &bastionSpec, - }, - } - err := k8sClient.Create(ctx, testCluster) - Expect(err).To(BeNil()) - err = k8sClient.Create(ctx, capiCluster) - Expect(err).To(BeNil()) - testCluster.Status = infrav1.OpenStackClusterStatus{ - Network: &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - ID: "network-id", - Name: "network-name", - }, - }, - Bastion: &infrav1.BastionStatus{ - ID: "requeue-bastion-uuid", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - }, - } - err = k8sClient.Status().Update(ctx, testCluster) - Expect(err).To(BeNil()) - - log := GinkgoLogr - clientScope, err := mockScopeFactory.NewClientScopeFromObject(ctx, k8sClient, nil, log, testCluster) - Expect(err).To(BeNil()) - scope := scope.NewWithLogger(clientScope, log) - - server := servers.Server{ - ID: "requeue-bastion-uuid", - Status: "BUILD", - } - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.GetServer("requeue-bastion-uuid").Return(&server, nil) - - res, err := reconcileBastion(scope, capiCluster, testCluster) - Expect(testCluster.Status.Bastion).To(Equal(&infrav1.BastionStatus{ - ID: "requeue-bastion-uuid", - State: "BUILD", - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - Ports: []infrav1.ResolvedPortSpec{ - { - NetworkID: "network-id", - }, - }, - }, - Resources: &infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: "portID1", - }, - }, - }, - })) - Expect(err).To(BeNil()) - Expect(res).To(Equal(&reconcile.Result{RequeueAfter: waitForBuildingInstanceToReconcile})) - }) It("should delete an existing bastion even if its uuid is not stored in status", func() { testCluster.SetName("delete-existing-bastion") testCluster.Spec = infrav1.OpenStackClusterSpec{} @@ -478,11 +223,6 @@ var _ = Describe("OpenStackCluster controller", func() { err = k8sClient.Create(ctx, capiCluster) Expect(err).To(BeNil()) testCluster.Status = infrav1.OpenStackClusterStatus{ - Bastion: &infrav1.BastionStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: "imageID", - }, - }, Network: &infrav1.NetworkStatusWithSubnets{ NetworkStatus: infrav1.NetworkStatus{ ID: "network-id", @@ -497,16 +237,7 @@ var _ = Describe("OpenStackCluster controller", func() { Expect(err).To(BeNil()) scope := scope.NewWithLogger(clientScope, log) - server := servers.Server{ID: "delete-bastion-uuid"} - - computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT() - computeClientRecorder.ListServers(servers.ListOpts{ - Name: "^capi-cluster-bastion$", - }).Return([]servers.Server{server}, nil) - computeClientRecorder.DeleteServer("delete-bastion-uuid").Return(nil) - computeClientRecorder.GetServer("delete-bastion-uuid").Return(nil, gophercloud.ErrResourceNotFound{}) - - err = deleteBastion(scope, capiCluster, testCluster) + err = reconciler.deleteBastion(ctx, scope, capiCluster, testCluster) Expect(err).To(BeNil()) }) diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index b1c649f596..f1e9b362b1 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "encoding/base64" "errors" "fmt" "time" @@ -27,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" @@ -47,12 +45,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/loadbalancer" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/names" ) // OpenStackMachineReconciler reconciles a OpenStackMachine object. @@ -155,37 +153,13 @@ func (r *OpenStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req // Handle deleted machines if !openStackMachine.DeletionTimestamp.IsZero() { - return r.reconcileDelete(scope, clusterResourceName, infraCluster, machine, openStackMachine) + return r.reconcileDelete(ctx, scope, clusterResourceName, infraCluster, machine, openStackMachine) } // Handle non-deleted clusters return r.reconcileNormal(ctx, scope, clusterResourceName, infraCluster, machine, openStackMachine) } -func resolveMachineResources(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine) (bool, error) { - resolved := openStackMachine.Status.Resolved - if resolved == nil { - resolved = &infrav1.ResolvedMachineSpec{} - openStackMachine.Status.Resolved = resolved - } - // Resolve and store resources - return compute.ResolveMachineSpec(scope, - &openStackMachine.Spec, resolved, - clusterResourceName, openStackMachine.Name, - openStackCluster, getManagedSecurityGroup(openStackCluster, machine)) -} - -func adoptMachineResources(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine) error { - resources := openStackMachine.Status.Resources - if resources == nil { - resources = &infrav1.MachineResources{} - openStackMachine.Status.Resources = resources - } - - // Adopt any existing resources - return compute.AdoptMachineResources(scope, openStackMachine.Status.Resolved, resources) -} - func patchMachine(ctx context.Context, patchHelper *patch.Helper, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine, options ...patch.Option) error { // Always update the readyCondition by summarizing the state of other conditions. applicableConditions := []clusterv1.ConditionType{ @@ -235,10 +209,19 @@ func (r *OpenStackMachineReconciler) SetupWithManager(ctx context.Context, mgr c &ipamv1.IPAddressClaim{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &infrav1.OpenStackMachine{}), ). + // TODO(emilien) to optimize because it's not efficient to watch all OpenStackServer events. + // We are only interested in certain state transitions of the OpenStackServer: + // - when the server is deleted + // - when the server becomes ready + // For that we probably want to write Predicate functions for the OpenStackServer. + Watches( + &infrav1alpha1.OpenStackServer{}, + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &infrav1.OpenStackMachine{}), + ). Complete(r) } -func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam +func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam scope.Logger().Info("Reconciling Machine delete") computeService, err := compute.NewService(scope) @@ -246,11 +229,6 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl return ctrl.Result{}, err } - networkingService, err := networking.NewService(scope) - if err != nil { - return ctrl.Result{}, err - } - // Nothing to do if the cluster is not ready because no machine resources were created. if !openStackCluster.Status.Ready || openStackCluster.Status.Network == nil { // The finalizer should not have been added yet in this case, @@ -259,38 +237,17 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl return ctrl.Result{}, nil } - // For machines created after v0.10, or any machine which has been - // reconciled at least once by v0.10 or later, status.Resolved always - // exists before any resources are created. We can therefore assume - // that if it does not exist, no resources were created. - // - // There is an upgrade edge case where a machine may have been marked - // deleted before upgrade but we are completing it after upgrade. For - // this use case only we make a best effort to resolve resources before - // continuing, but if we get an error we log it and continue anyway. - // This has the potential to leak resources, but only in this specific - // edge case. The alternative is to continue retrying until it succeeds, - // but that risks never deleting a machine which cannot be resolved due - // to a spec error. - // - // This code can and should be deleted in a future release when we are - // sure that all machines have been reconciled at least by a v0.10 or - // later controller. - if _, err := resolveMachineResources(scope, clusterResourceName, openStackCluster, openStackMachine, machine); err != nil { - // Return the error, but allow the resource to be removed anyway. - controllerutil.RemoveFinalizer(openStackMachine, infrav1.MachineFinalizer) + machineServer, err := r.getMachineServer(ctx, openStackMachine) + if client.IgnoreNotFound(err) != nil { return ctrl.Result{}, err } - // Check for any orphaned resources - // N.B. Unlike resolveMachineResources, we must always look for orphaned resources in the delete path. - if err := adoptMachineResources(scope, openStackMachine); err != nil { - return ctrl.Result{}, fmt.Errorf("adopting machine resources: %w", err) - } - - instanceStatus, err := getInstanceStatus(openStackMachine, computeService) - if err != nil { - return ctrl.Result{}, err + var instanceStatus *compute.InstanceStatus + if machineServer != nil && machineServer.Status.InstanceID != nil { + instanceStatus, err = computeService.GetInstanceStatus(*machineServer.Status.InstanceID) + if err != nil { + return ctrl.Result{}, err + } } if util.IsControlPlaneMachine(machine) { @@ -299,35 +256,16 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl } } - // If no instance was created we currently need to check for orphaned - // volumes. - if instanceStatus == nil { - if err := computeService.DeleteVolumes(openStackMachine.Name, openStackMachine.Spec.RootVolume, openStackMachine.Spec.AdditionalBlockDevices); err != nil { - return ctrl.Result{}, fmt.Errorf("delete volumes: %w", err) - } - } else { - if err := computeService.DeleteInstance(openStackMachine, instanceStatus); err != nil { + if machineServer != nil { + scope.Logger().Info("Deleting server", "name", machineServer.Name) + if err := r.Client.Delete(ctx, machineServer); err != nil { conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceDeleteFailedReason, clusterv1.ConditionSeverityError, "Deleting instance failed: %v", err) - return ctrl.Result{}, fmt.Errorf("delete instance: %w", err) - } - } - - trunkSupported, err := networkingService.IsTrunkExtSupported() - if err != nil { - return ctrl.Result{}, err - } - - if openStackMachine.Status.Resources != nil { - portsStatus := openStackMachine.Status.Resources.Ports - for _, port := range portsStatus { - if err := networkingService.DeleteInstanceTrunkAndPort(openStackMachine, port, trunkSupported); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to delete port %q: %w", port.ID, err) - } + return ctrl.Result{}, err } - } - - if err := r.reconcileDeleteFloatingAddressFromPool(scope, openStackMachine); err != nil { - return ctrl.Result{}, err + // If the server was found, we need to wait for it to be deleted before + // removing the OpenStackMachine finalizer. + scope.Logger().Info("Waiting for server to be deleted before removing finalizer") + return ctrl.Result{}, nil } controllerutil.RemoveFinalizer(openStackMachine, infrav1.MachineFinalizer) @@ -335,13 +273,6 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl return ctrl.Result{}, nil } -func getInstanceStatus(openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service) (*compute.InstanceStatus, error) { - if openStackMachine.Status.InstanceID != nil { - return computeService.GetInstanceStatus(*openStackMachine.Status.InstanceID) - } - return computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) -} - func removeAPIServerEndpoint(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceStatus *compute.InstanceStatus, clusterResourceName string) error { if openStackCluster.Spec.APIServerLoadBalancer.IsEnabled() { loadBalancerService, err := loadbalancer.NewService(scope) @@ -400,136 +331,6 @@ func GetPortIDs(ports []infrav1.PortStatus) []string { return portIDs } -// reconcileFloatingAddressFromPool reconciles the floating IP address from the pool. -// It returns the IPAddressClaim and a boolean indicating if the IPAddressClaim is ready. -func (r *OpenStackMachineReconciler) reconcileFloatingAddressFromPool(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) (*ipamv1.IPAddressClaim, bool, error) { - if openStackMachine.Spec.FloatingIPPoolRef == nil { - return nil, false, nil - } - var claim *ipamv1.IPAddressClaim - claim, err := r.getOrCreateIPAddressClaimForFloatingAddress(ctx, scope, openStackMachine, openStackCluster) - if err != nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityInfo, "Failed to reconcile floating IP claims: %v", err) - return nil, true, err - } - if claim.Status.AddressRef.Name == "" { - r.Recorder.Eventf(openStackMachine, corev1.EventTypeNormal, "WaitingForIPAddressClaim", "Waiting for IPAddressClaim %s/%s to be allocated", claim.Namespace, claim.Name) - return claim, true, nil - } - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) - return claim, false, nil -} - -// createIPAddressClaim creates IPAddressClaim for the FloatingAddressFromPool if it does not exist yet. -func (r *OpenStackMachineReconciler) getOrCreateIPAddressClaimForFloatingAddress(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) (*ipamv1.IPAddressClaim, error) { - var err error - - poolRef := openStackMachine.Spec.FloatingIPPoolRef - claimName := names.GetFloatingAddressClaimName(openStackMachine.Name) - claim := &ipamv1.IPAddressClaim{} - - err = r.Client.Get(ctx, client.ObjectKey{Namespace: openStackMachine.Namespace, Name: claimName}, claim) - if err == nil { - return claim, nil - } else if client.IgnoreNotFound(err) != nil { - return nil, err - } - - claim = &ipamv1.IPAddressClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: claimName, - Namespace: openStackMachine.Namespace, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: openStackCluster.Labels[clusterv1.ClusterNameLabel], - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: openStackMachine.APIVersion, - Kind: openStackMachine.Kind, - Name: openStackMachine.Name, - UID: openStackMachine.UID, - }, - }, - Finalizers: []string{infrav1.IPClaimMachineFinalizer}, - }, - Spec: ipamv1.IPAddressClaimSpec{ - PoolRef: *poolRef, - }, - } - - if err := r.Client.Create(ctx, claim); err != nil { - return nil, err - } - - r.Recorder.Eventf(openStackMachine, corev1.EventTypeNormal, "CreatingIPAddressClaim", "Creating IPAddressClaim %s/%s", claim.Namespace, claim.Name) - scope.Logger().Info("Created IPAddressClaim", "name", claim.Name) - return claim, nil -} - -func (r *OpenStackMachineReconciler) associateIPAddressFromIPAddressClaim(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, instanceStatus *compute.InstanceStatus, instanceNS *compute.InstanceNetworkStatus, claim *ipamv1.IPAddressClaim) error { - address := &ipamv1.IPAddress{} - addressKey := client.ObjectKey{Namespace: openStackMachine.Namespace, Name: claim.Status.AddressRef.Name} - - if err := r.Client.Get(ctx, addressKey, address); err != nil { - return err - } - - instanceAddresses := instanceNS.Addresses() - for _, instanceAddress := range instanceAddresses { - if instanceAddress.Address == address.Spec.Address { - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) - return nil - } - } - - networkingService, err := networking.NewService(scope) - if err != nil { - return err - } - - fip, err := networkingService.GetFloatingIP(address.Spec.Address) - if err != nil { - return err - } - - if fip == nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "floating IP does not exist") - return fmt.Errorf("floating IP %q does not exist", address.Spec.Address) - } - - port, err := networkingService.GetPortForExternalNetwork(instanceStatus.ID(), fip.FloatingNetworkID) - if err != nil { - return fmt.Errorf("get port for floating IP %q: %w", fip.FloatingIP, err) - } - - if port == nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Can't find port for floating IP %q on external network %s", fip.FloatingIP, fip.FloatingNetworkID) - return fmt.Errorf("port for floating IP %q on network %s does not exist", fip.FloatingIP, fip.FloatingNetworkID) - } - - if err = networkingService.AssociateFloatingIP(openStackMachine, fip, port.ID); err != nil { - return err - } - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) - return nil -} - -func (r *OpenStackMachineReconciler) reconcileDeleteFloatingAddressFromPool(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine) error { - log := scope.Logger().WithValues("openStackMachine", openStackMachine.Name) - log.Info("Reconciling Machine delete floating address from pool") - if openStackMachine.Spec.FloatingIPPoolRef == nil { - return nil - } - claimName := names.GetFloatingAddressClaimName(openStackMachine.Name) - claim := &ipamv1.IPAddressClaim{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: openStackMachine.Namespace, Name: claimName}, claim); err != nil { - return client.IgnoreNotFound(err) - } - - controllerutil.RemoveFinalizer(claim, infrav1.IPClaimMachineFinalizer) - return r.Client.Update(context.Background(), claim) -} - func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (_ ctrl.Result, reterr error) { var err error @@ -552,69 +353,29 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } - changed, err := resolveMachineResources(scope, clusterResourceName, openStackCluster, openStackMachine, machine) - if err != nil { - return ctrl.Result{}, err - } - - // Also add the finalizer when writing resolved resources so we can start creating resources on the next reconcile. if controllerutil.AddFinalizer(openStackMachine, infrav1.MachineFinalizer) { - changed = true - } - - // We requeue if we either added the finalizer or resolved machine - // resources. This means that we never create any resources unless we - // have observed that the finalizer and resolved machine resources were - // successfully written in a previous transaction. This in turn means - // that in the delete path we can be sure that if there are no resolved - // resources then no resources were created. - if changed { - scope.Logger().V(6).Info("Machine resources updated, requeuing") return ctrl.Result{}, nil } - // Check for orphaned resources previously created but not written to the status - if err := adoptMachineResources(scope, openStackMachine); err != nil { - return ctrl.Result{}, fmt.Errorf("adopting machine resources: %w", err) - } - scope.Logger().Info("Reconciling Machine") - userData, err := r.getBootstrapData(ctx, machine, openStackMachine) - if err != nil { - return ctrl.Result{}, err - } - computeService, err := compute.NewService(scope) - if err != nil { - return ctrl.Result{}, err - } - - floatingAddressClaim, waitingForFloatingAddress, err := r.reconcileFloatingAddressFromPool(ctx, scope, openStackMachine, openStackCluster) - if err != nil || waitingForFloatingAddress { + machineServer, waitingForServer, err := r.reconcileMachineServer(ctx, scope, openStackMachine, openStackCluster, machine) + if err != nil || waitingForServer { return ctrl.Result{}, err } - networkingService, err := networking.NewService(scope) - if err != nil { - return ctrl.Result{}, err - } - - err = getOrCreateMachinePorts(openStackMachine, networkingService) + computeService, err := compute.NewService(scope) if err != nil { return ctrl.Result{}, err } - portIDs := GetPortIDs(openStackMachine.Status.Resources.Ports) - instanceStatus, err := r.getOrCreateInstance(scope.Logger(), openStackCluster, machine, openStackMachine, computeService, userData, portIDs) - if err != nil || instanceStatus == nil { - // Conditions set in getOrCreateInstance + // instanceStatus is required for the API server load balancer and floating IP reconciliation + // when Octavia is enabled. + var instanceStatus *compute.InstanceStatus + if instanceStatus, err = computeService.GetInstanceStatus(*machineServer.Status.InstanceID); err != nil { return ctrl.Result{}, err } - state := instanceStatus.State() - openStackMachine.Status.InstanceID = ptr.To(instanceStatus.ID()) - openStackMachine.Status.InstanceState = &state - instanceNS, err := instanceStatus.NetworkStatus() if err != nil { return ctrl.Result{}, fmt.Errorf("get network status: %w", err) @@ -631,63 +392,204 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope }) openStackMachine.Status.Addresses = addresses - if floatingAddressClaim != nil { - if err := r.associateIPAddressFromIPAddressClaim(ctx, scope, openStackMachine, instanceStatus, instanceNS, floatingAddressClaim); err != nil { - conditions.MarkFalse(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Failed while associating ip from pool: %v", err) - return ctrl.Result{}, err - } - conditions.MarkTrue(openStackMachine, infrav1.FloatingAddressFromPoolReadyCondition) + result := r.reconcileMachineState(scope, openStackMachine, machine, machineServer) + if result != nil { + return *result, nil + } + + if !util.IsControlPlaneMachine(machine) { + scope.Logger().Info("Not a Control plane machine, no floating ip reconcile needed, Reconciled Machine create successfully") + return ctrl.Result{}, nil + } + + err = r.reconcileAPIServerLoadBalancer(scope, openStackCluster, openStackMachine, instanceStatus, instanceNS, clusterResourceName) + if err != nil { + return ctrl.Result{}, err } - switch instanceStatus.State() { + conditions.MarkTrue(openStackMachine, infrav1.APIServerIngressReadyCondition) + scope.Logger().Info("Reconciled Machine create successfully") + return ctrl.Result{}, nil +} + +// reconcileMachineState updates the conditions of the OpenStackMachine instance based on the instance state +// and sets the ProviderID and Ready fields when the instance is active. +// It returns a reconcile request if the instance is not yet active. +func (r *OpenStackMachineReconciler) reconcileMachineState(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine, openStackServer *infrav1alpha1.OpenStackServer) *ctrl.Result { + switch *openStackServer.Status.InstanceState { case infrav1.InstanceStateActive: - scope.Logger().Info("Machine instance state is ACTIVE", "id", instanceStatus.ID()) + scope.Logger().Info("Machine instance state is ACTIVE", "id", openStackServer.Status.InstanceID) conditions.MarkTrue(openStackMachine, infrav1.InstanceReadyCondition) // Set properties required by CAPI machine controller - openStackMachine.Spec.ProviderID = ptr.To(fmt.Sprintf("openstack:///%s", instanceStatus.ID())) + openStackMachine.Spec.ProviderID = ptr.To(fmt.Sprintf("openstack:///%s", *openStackServer.Status.InstanceID)) openStackMachine.Status.Ready = true case infrav1.InstanceStateError: // If the machine has a NodeRef then it must have been working at some point, // so the error could be something temporary. // If not, it is more likely a configuration error so we set failure and never retry. - scope.Logger().Info("Machine instance state is ERROR", "id", instanceStatus.ID()) + scope.Logger().Info("Machine instance state is ERROR", "id", openStackServer.Status.InstanceID) if machine.Status.NodeRef == nil { - err = fmt.Errorf("instance state %q is unexpected", instanceStatus.State()) + err := fmt.Errorf("instance state %v is unexpected", openStackServer.Status.InstanceState) openStackMachine.SetFailure(capierrors.UpdateMachineError, err) } conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStateErrorReason, clusterv1.ConditionSeverityError, "") - return ctrl.Result{}, nil + return &ctrl.Result{} case infrav1.InstanceStateDeleted: // we should avoid further actions for DELETED VM scope.Logger().Info("Machine instance state is DELETED, no actions") conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceDeletedReason, clusterv1.ConditionSeverityError, "") - return ctrl.Result{}, nil + return &ctrl.Result{} case infrav1.InstanceStateBuild, infrav1.InstanceStateUndefined: - scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) - return ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", openStackServer.Status.InstanceID, "status", openStackServer.Status.InstanceState) + return &ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile} default: // The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE // due to potential conflict or unexpected actions - scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) - conditions.MarkUnknown(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %s", instanceStatus.State()) + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", openStackServer.Status.InstanceID, "status", openStackServer.Status.InstanceState) + conditions.MarkUnknown(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %v", openStackServer.Status.InstanceState) - return ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile}, nil + return &ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile} } + return nil +} - if !util.IsControlPlaneMachine(machine) { - scope.Logger().Info("Not a Control plane machine, no floating ip reconcile needed, Reconciled Machine create successfully") - return ctrl.Result{}, nil +func (r *OpenStackMachineReconciler) getMachineServer(ctx context.Context, openStackMachine *infrav1.OpenStackMachine) (*infrav1alpha1.OpenStackServer, error) { + machineServer := &infrav1alpha1.OpenStackServer{} + machineServerName := client.ObjectKey{ + Namespace: openStackMachine.Namespace, + Name: openStackMachine.Name, + } + err := r.Client.Get(ctx, machineServerName, machineServer) + if err != nil { + return nil, err } + return machineServer, nil +} - err = r.reconcileAPIServerLoadBalancer(scope, openStackCluster, openStackMachine, instanceStatus, instanceNS, clusterResourceName) +// openStackMachineSpecToOpenStackServerSpec converts an OpenStackMachineSpec to an OpenStackServerSpec. +// It returns the OpenStackServerSpec object and an error if there is any. +func openStackMachineSpecToOpenStackServerSpec(openStackMachineSpec *infrav1.OpenStackMachineSpec, identityRef infrav1.OpenStackIdentityReference, tags []string, failureDomain string, userDataRef *corev1.LocalObjectReference, defaultSecGroup *string, defaultNetworkID string) *infrav1alpha1.OpenStackServerSpec { + openStackServerSpec := &infrav1alpha1.OpenStackServerSpec{ + AdditionalBlockDevices: openStackMachineSpec.AdditionalBlockDevices, + ConfigDrive: openStackMachineSpec.ConfigDrive, + Flavor: openStackMachineSpec.Flavor, + IdentityRef: identityRef, + Image: openStackMachineSpec.Image, + RootVolume: openStackMachineSpec.RootVolume, + ServerMetadata: openStackMachineSpec.ServerMetadata, + SSHKeyName: openStackMachineSpec.SSHKeyName, + } + + if len(tags) > 0 { + openStackServerSpec.Tags = tags + } + + if failureDomain != "" { + openStackServerSpec.AvailabilityZone = &failureDomain + } + + if userDataRef != nil { + openStackServerSpec.UserDataRef = userDataRef + } + + if openStackMachineSpec.Trunk { + openStackServerSpec.Trunk = ptr.To(true) + } + + if openStackMachineSpec.FloatingIPPoolRef != nil { + openStackServerSpec.FloatingIPPoolRef = openStackMachineSpec.FloatingIPPoolRef + } + + // If not ports are provided we create one. + // Ports must have a network so if none is provided we use the default network. + serverPorts := openStackMachineSpec.Ports + if len(openStackMachineSpec.Ports) == 0 { + serverPorts = make([]infrav1.PortOpts, 1) + } + for i := range serverPorts { + if serverPorts[i].Network == nil { + serverPorts[i].Network = &infrav1.NetworkParam{ + ID: &defaultNetworkID, + } + } + if len(serverPorts[i].SecurityGroups) == 0 && defaultSecGroup != nil { + serverPorts[i].SecurityGroups = []infrav1.SecurityGroupParam{ + { + ID: defaultSecGroup, + }, + } + } + } + openStackServerSpec.Ports = serverPorts + + return openStackServerSpec +} + +// reconcileMachineServer reconciles the OpenStackServer object for the OpenStackMachine. +// It returns the OpenStackServer object and a boolean indicating if the OpenStackServer is ready. +func (r *OpenStackMachineReconciler) reconcileMachineServer(ctx context.Context, scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine) (*infrav1alpha1.OpenStackServer, bool, error) { + var server *infrav1alpha1.OpenStackServer + server, err := r.getOrCreateMachineServer(ctx, openStackCluster, openStackMachine, machine) if err != nil { - return ctrl.Result{}, err + // If an error occurs while getting or creating the OpenStackServer, + // we won't requeue the request so reconcileNormal can add conditions to the OpenStackMachine + // and we can see the error in the logs. + scope.Logger().Error(err, "Failed to get or create OpenStackServer") + return server, false, err + } + if !server.Status.Ready { + scope.Logger().Info("Waiting for OpenStackServer to be ready", "name", server.Name) + return server, true, nil } + return server, false, nil +} - conditions.MarkTrue(openStackMachine, infrav1.APIServerIngressReadyCondition) - scope.Logger().Info("Reconciled Machine create successfully") - return ctrl.Result{}, nil +// getOrCreateMachineServer gets or creates the OpenStackServer object for the OpenStackMachine. +// It returns the OpenStackServer object and an error if there is any. +func (r *OpenStackMachineReconciler) getOrCreateMachineServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine) (*infrav1alpha1.OpenStackServer, error) { + if machine.Spec.Bootstrap.DataSecretName == nil { + return nil, errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") + } + userDataRef := &corev1.LocalObjectReference{ + Name: *machine.Spec.Bootstrap.DataSecretName, + } + + var failureDomain string + if machine.Spec.FailureDomain != nil { + failureDomain = *machine.Spec.FailureDomain + } + machineServer, err := r.getMachineServer(ctx, openStackMachine) + + if client.IgnoreNotFound(err) != nil { + return nil, err + } + if apierrors.IsNotFound(err) { + machineServerSpec := openStackMachineSpecToOpenStackServerSpec(&openStackMachine.Spec, openStackCluster.Spec.IdentityRef, compute.InstanceTags(&openStackMachine.Spec, openStackCluster), failureDomain, userDataRef, getManagedSecurityGroup(openStackCluster, machine), openStackCluster.Status.Network.ID) + machineServer = &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + clusterv1.ClusterNameLabel: openStackCluster.Labels[clusterv1.ClusterNameLabel], + }, + Name: openStackMachine.Name, + Namespace: openStackMachine.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: openStackMachine.APIVersion, + Kind: openStackMachine.Kind, + Name: openStackMachine.Name, + UID: openStackMachine.UID, + }, + }, + }, + Spec: *machineServerSpec, + } + + if err := r.Client.Create(ctx, machineServer); err != nil { + return nil, fmt.Errorf("failed to create machine server: %w", err) + } + } + return machineServer, nil } func (r *OpenStackMachineReconciler) reconcileAPIServerLoadBalancer(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceStatus *compute.InstanceStatus, instanceNS *compute.InstanceNetworkStatus, clusterResourceName string) error { @@ -741,109 +643,6 @@ func (r *OpenStackMachineReconciler) reconcileAPIServerLoadBalancer(scope *scope return nil } -func getOrCreateMachinePorts(openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service) error { - resolved := openStackMachine.Status.Resolved - if resolved == nil { - return errors.New("machine resolved is nil") - } - resources := openStackMachine.Status.Resources - if resources == nil { - return errors.New("machine resources is nil") - } - desiredPorts := resolved.Ports - - if len(desiredPorts) == len(resources.Ports) { - return nil - } - - if err := networkingService.CreatePorts(openStackMachine, desiredPorts, resources); err != nil { - return fmt.Errorf("creating ports: %w", err) - } - - return nil -} - -func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string, portIDs []string) (*compute.InstanceStatus, error) { - var instanceStatus *compute.InstanceStatus - var err error - if openStackMachine.Status.InstanceID != nil { - instanceStatus, err = computeService.GetInstanceStatus(*openStackMachine.Status.InstanceID) - if err != nil { - logger.Info("Unable to get OpenStack instance", "name", openStackMachine.Name) - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.OpenStackErrorReason, clusterv1.ConditionSeverityError, err.Error()) - return nil, err - } - } - if instanceStatus == nil { - // Check if there is an existing instance with machine name, in case where instance ID would not have been stored in machine status - instanceStatus, err = computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) - if err != nil { - logger.Info("Unable to get OpenStack instance by name", "name", openStackMachine.Name) - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) - return nil, err - } - if instanceStatus != nil { - return instanceStatus, nil - } - if openStackMachine.Status.InstanceID != nil { - logger.Info("Not reconciling machine in failed state. The previously existing OpenStack instance is no longer available") - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists") - openStackMachine.SetFailure(capierrors.UpdateMachineError, errors.New("virtual machine no longer exists")) - return nil, nil - } - - instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) - if err != nil { - return nil, err - } - - logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name) - instanceStatus, err = computeService.CreateInstance(openStackMachine, instanceSpec, portIDs) - if err != nil { - conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) - return nil, fmt.Errorf("create OpenStack instance: %w", err) - } - } - return instanceStatus, nil -} - -func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, userData string) (*compute.InstanceSpec, error) { - resolved := openStackMachine.Status.Resolved - if resolved == nil { - return nil, errors.New("machine resolved is nil") - } - - serverMetadata := make(map[string]string, len(openStackMachine.Spec.ServerMetadata)) - for i := range openStackMachine.Spec.ServerMetadata { - key := openStackMachine.Spec.ServerMetadata[i].Key - value := openStackMachine.Spec.ServerMetadata[i].Value - serverMetadata[key] = value - } - - instanceSpec := compute.InstanceSpec{ - Name: openStackMachine.Name, - ImageID: resolved.ImageID, - Flavor: openStackMachine.Spec.Flavor, - SSHKeyName: openStackMachine.Spec.SSHKeyName, - UserData: userData, - Metadata: serverMetadata, - ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, - RootVolume: openStackMachine.Spec.RootVolume, - AdditionalBlockDevices: openStackMachine.Spec.AdditionalBlockDevices, - ServerGroupID: resolved.ServerGroupID, - Trunk: openStackMachine.Spec.Trunk, - } - - // Add the failure domain only if specified - if machine.Spec.FailureDomain != nil { - instanceSpec.FailureDomain = *machine.Spec.FailureDomain - } - - instanceSpec.Tags = compute.InstanceTags(&openStackMachine.Spec, openStackCluster) - - return &instanceSpec, nil -} - // getManagedSecurityGroup returns the ID of the security group managed by the // OpenStackCluster whether it's a control plane or a worker machine. func getManagedSecurityGroup(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine) *string { @@ -851,6 +650,10 @@ func getManagedSecurityGroup(openStackCluster *infrav1.OpenStackCluster, machine return nil } + if machine == nil { + return nil + } + if util.IsControlPlaneMachine(machine) { if openStackCluster.Status.ControlPlaneSecurityGroup != nil { return &openStackCluster.Status.ControlPlaneSecurityGroup.ID @@ -906,25 +709,6 @@ func (r *OpenStackMachineReconciler) OpenStackClusterToOpenStackMachines(ctx con } } -func (r *OpenStackMachineReconciler) getBootstrapData(ctx context.Context, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (string, error) { - if machine.Spec.Bootstrap.DataSecretName == nil { - return "", errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") - } - - secret := &corev1.Secret{} - key := types.NamespacedName{Namespace: machine.Namespace, Name: *machine.Spec.Bootstrap.DataSecretName} - if err := r.Client.Get(ctx, key, secret); err != nil { - return "", fmt.Errorf("failed to retrieve bootstrap data secret for Openstack Machine %s/%s: %w", machine.Namespace, openStackMachine.Name, err) - } - - value, ok := secret.Data["value"] - if !ok { - return "", errors.New("error retrieving bootstrap data: secret value key is missing") - } - - return base64.StdEncoding.EncodeToString(value), nil -} - func (r *OpenStackMachineReconciler) requeueOpenStackMachinesForUnpausedCluster(ctx context.Context) handler.MapFunc { log := ctrl.LoggerFrom(ctx) return func(ctx context.Context, o client.Object) []ctrl.Request { diff --git a/controllers/openstackmachine_controller_test.go b/controllers/openstackmachine_controller_test.go index 5212ca4944..ed7505af76 100644 --- a/controllers/openstackmachine_controller_test.go +++ b/controllers/openstackmachine_controller_test.go @@ -17,29 +17,14 @@ limitations under the License. package controllers import ( - "fmt" "reflect" "testing" - "github.com/go-logr/logr/testr" - "github.com/google/go-cmp/cmp" - "github.com/gophercloud/gophercloud/v2" - "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/v2/openstack/image/v2/images" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks" - "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" - . "github.com/onsi/gomega" //nolint:revive - "go.uber.org/mock/gomock" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) const ( @@ -58,124 +43,71 @@ const ( failureDomain = "test-failure-domain" ) -func getDefaultOpenStackCluster() *infrav1.OpenStackCluster { - return &infrav1.OpenStackCluster{ - Spec: infrav1.OpenStackClusterSpec{}, +func TestOpenStackMachineSpecToOpenStackServerSpec(t *testing.T) { + identityRef := infrav1.OpenStackIdentityReference{ + Name: "foo", + CloudName: "my-cloud", + } + openStackCluster := &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ManagedSecurityGroups: &infrav1.ManagedSecurityGroups{}, + }, Status: infrav1.OpenStackClusterStatus{ + WorkerSecurityGroup: &infrav1.SecurityGroupStatus{ + ID: workerSecurityGroupUUID, + }, Network: &infrav1.NetworkStatusWithSubnets{ NetworkStatus: infrav1.NetworkStatus{ ID: networkUUID, }, - Subnets: []infrav1.Subnet{ - {ID: subnetUUID}, - }, }, - ControlPlaneSecurityGroup: &infrav1.SecurityGroupStatus{ID: controlPlaneSecurityGroupUUID}, - WorkerSecurityGroup: &infrav1.SecurityGroupStatus{ID: workerSecurityGroupUUID}, }, } -} - -func getDefaultMachine() *clusterv1.Machine { - return &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - FailureDomain: ptr.To(failureDomain), - }, - } -} - -func getDefaultOpenStackMachine() *infrav1.OpenStackMachine { - return &infrav1.OpenStackMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: openStackMachineName, - Namespace: namespace, - }, - Spec: infrav1.OpenStackMachineSpec{ - // ProviderID is set by the controller - // InstanceID is set by the controller - // FloatingIP is only used by the cluster controller for the Bastion - // TODO: Test Networks, Ports, Subnet, and Trunk separately - Flavor: flavorName, - Image: infrav1.ImageParam{ID: ptr.To(imageUUID)}, - SSHKeyName: sshKeyName, - Tags: []string{"test-tag"}, - ServerMetadata: []infrav1.ServerMetadata{ - {Key: "test-metadata", Value: "test-value"}, + portOpts := []infrav1.PortOpts{ + { + Network: &infrav1.NetworkParam{ + ID: ptr.To(openStackCluster.Status.Network.ID), }, - ConfigDrive: ptr.To(true), - SecurityGroups: []infrav1.SecurityGroupParam{}, - ServerGroup: &infrav1.ServerGroupParam{ID: ptr.To(serverGroupUUID)}, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - ServerGroupID: serverGroupUUID, + SecurityGroups: []infrav1.SecurityGroupParam{ + { + ID: ptr.To(openStackCluster.Status.WorkerSecurityGroup.ID), + }, }, }, } -} - -func getDefaultInstanceSpec() *compute.InstanceSpec { - return &compute.InstanceSpec{ - Name: openStackMachineName, - ImageID: imageUUID, - Flavor: flavorName, - SSHKeyName: sshKeyName, - UserData: "user-data", - Metadata: map[string]string{ - "test-metadata": "test-value", - }, - ConfigDrive: *ptr.To(true), - FailureDomain: *ptr.To(failureDomain), - ServerGroupID: serverGroupUUID, - Tags: []string{"test-tag"}, - } -} - -func Test_machineToInstanceSpec(t *testing.T) { - RegisterTestingT(t) - + image := infrav1.ImageParam{Filter: &infrav1.ImageFilter{Name: ptr.To("my-image")}} + tags := []string{"tag1", "tag2"} + userData := &corev1.LocalObjectReference{Name: "server-data-secret"} tests := []struct { - name string - openStackCluster func() *infrav1.OpenStackCluster - machine func() *clusterv1.Machine - openStackMachine func() *infrav1.OpenStackMachine - wantInstanceSpec func() *compute.InstanceSpec + name string + spec *infrav1.OpenStackMachineSpec + want *infrav1alpha1.OpenStackServerSpec }{ { - name: "Defaults", - openStackCluster: getDefaultOpenStackCluster, - machine: getDefaultMachine, - openStackMachine: getDefaultOpenStackMachine, - wantInstanceSpec: getDefaultInstanceSpec, - }, - { - name: "Tags", - openStackCluster: func() *infrav1.OpenStackCluster { - c := getDefaultOpenStackCluster() - c.Spec.Tags = []string{"cluster-tag", "duplicate-tag"} - return c + name: "Test a minimum OpenStackMachineSpec to OpenStackServerSpec conversion", + spec: &infrav1.OpenStackMachineSpec{ + Flavor: flavorName, + Image: image, + SSHKeyName: sshKeyName, }, - machine: getDefaultMachine, - openStackMachine: func() *infrav1.OpenStackMachine { - m := getDefaultOpenStackMachine() - m.Spec.Tags = []string{"machine-tag", "duplicate-tag"} - return m - }, - wantInstanceSpec: func() *compute.InstanceSpec { - i := getDefaultInstanceSpec() - i.Tags = []string{"machine-tag", "duplicate-tag", "cluster-tag"} - return i + want: &infrav1alpha1.OpenStackServerSpec{ + Flavor: flavorName, + IdentityRef: identityRef, + Image: image, + SSHKeyName: sshKeyName, + Ports: portOpts, + Tags: tags, + UserDataRef: userData, }, }, } - for _, tt := range tests { + for i := range tests { + tt := tests[i] t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - got, _ := machineToInstanceSpec(tt.openStackCluster(), tt.machine(), tt.openStackMachine(), "user-data") - wanted := tt.wantInstanceSpec() - - g.Expect(got).To(Equal(wanted), cmp.Diff(got, wanted)) + spec := openStackMachineSpecToOpenStackServerSpec(tt.spec, identityRef, tags, "", userData, &openStackCluster.Status.WorkerSecurityGroup.ID, openStackCluster.Status.Network.ID) + if !reflect.DeepEqual(spec, tt.want) { + t.Errorf("openStackMachineSpecToOpenStackServerSpec() got = %+v, want %+v", spec, tt.want) + } }) } } @@ -218,354 +150,3 @@ func TestGetPortIDs(t *testing.T) { }) } } - -func Test_reconcileDelete(t *testing.T) { - const ( - instanceUUID = "8308882f-5e46-47e6-8e12-1fe869c43d1d" - portUUID = "55eac199-4836-4a98-b31c-9f65f382ad46" - rootVolumeUUID = "4724a66d-bd5e-47f3-bb57-a67fcb4168e0" - trunkUUID = "9d348baa-93b1-4e63-932f-dd0527fbd789" - - imageName = "my-image" - ) - - // ******************* - // START OF TEST CASES - // ******************* - - type recorders struct { - compute *mock.MockComputeClientMockRecorder - image *mock.MockImageClientMockRecorder - network *mock.MockNetworkClientMockRecorder - volume *mock.MockVolumeClientMockRecorder - } - - defaultImage := infrav1.ImageParam{ - Filter: &infrav1.ImageFilter{ - Name: ptr.To(imageName), - }, - } - - defaultResolvedPorts := []infrav1.ResolvedPortSpec{ - { - Name: openStackMachineName + "-0", - Description: "my test port", - NetworkID: networkUUID, - }, - } - defaultPortsStatus := []infrav1.PortStatus{ - { - ID: portUUID, - }, - } - - deleteDefaultPorts := func(r *recorders) { - trunkExtension := extensions.Extension{} - trunkExtension.Alias = "trunk" - r.network.ListExtensions().Return([]extensions.Extension{trunkExtension}, nil) - r.network.ListTrunk(trunks.ListOpts{PortID: portUUID}).Return([]trunks.Trunk{{ID: trunkUUID}}, nil) - r.network.ListTrunkSubports(trunkUUID).Return([]trunks.Subport{}, nil) - r.network.DeleteTrunk(trunkUUID).Return(nil) - r.network.DeletePort(portUUID).Return(nil) - } - - deleteServerByID := func(r *recorders) { - r.compute.GetServer(instanceUUID).Return(&servers.Server{ - ID: instanceUUID, - Name: openStackMachineName, - }, nil) - r.compute.DeleteServer(instanceUUID).Return(nil) - r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) - } - deleteServerByName := func(r *recorders) { - r.compute.ListServers(servers.ListOpts{ - Name: "^" + openStackMachineName + "$", - }).Return([]servers.Server{ - { - ID: instanceUUID, - Name: openStackMachineName, - }, - }, nil) - r.compute.DeleteServer(instanceUUID).Return(nil) - r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) - } - - deleteMissingServerByName := func(r *recorders) { - // Lookup server by name because it is not in status. - // Don't find it. - r.compute.ListServers(servers.ListOpts{ - Name: "^" + openStackMachineName + "$", - }).Return([]servers.Server{}, nil) - } - - deleteRootVolume := func(r *recorders) { - // Fetch volume by name - volumeName := fmt.Sprintf("%s-root", openStackMachineName) - r.volume.ListVolumes(volumes.ListOpts{ - AllTenants: false, - Name: volumeName, - TenantID: "", - }).Return([]volumes.Volume{{ - ID: rootVolumeUUID, - Name: volumeName, - }}, nil) - - // Delete volume - r.volume.DeleteVolume(rootVolumeUUID, volumes.DeleteOpts{}).Return(nil) - } - - adoptExistingPorts := func(r *recorders) { - r.network.ListPort(ports.ListOpts{ - NetworkID: networkUUID, - Name: openStackMachineName + "-0", - }).Return([]ports.Port{{ID: portUUID}}, nil) - } - - resolveImage := func(r *recorders) { - r.image.ListImages(images.ListOpts{ - Name: imageName, - }).Return([]images.Image{{ID: imageUUID}}, nil) - } - - tests := []struct { - name string - osMachine infrav1.OpenStackMachine - expect func(r *recorders) - wantErr bool - wantRemoveFinalizer bool - clusterNotReady bool - }{ - { - name: "No volumes, resolved and resources populated", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - InstanceID: ptr.To(instanceUUID), - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - deleteServerByID(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - name: "Root volume, resolved and resources populated", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - RootVolume: &infrav1.RootVolume{ - SizeGiB: 50, - }, - }, - Status: infrav1.OpenStackMachineStatus{ - InstanceID: ptr.To(instanceUUID), - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - // Server exists, so we don't delete root volume explicitly - deleteServerByID(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - name: "Root volume, machine not created, resolved and resources populated", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - RootVolume: &infrav1.RootVolume{ - SizeGiB: 50, - }, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - deleteMissingServerByName(r) - deleteRootVolume(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - // N.B. The 'no resolved but resource exist' case can - // only happen across an upgrade. At some point in the - // future we should stop handling it. - name: "No volumes, no resolved or resources, instance exists", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - // Unlike resolved and resources, - // instanceID will have been converted - // from the previous API version. - InstanceID: ptr.To(instanceUUID), - }, - }, - expect: func(r *recorders) { - resolveImage(r) - adoptExistingPorts(r) - deleteServerByID(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - // This is an upgrade case because from v0.10 onwards - // we don't add the finalizer until we add resolved, so - // this can no longer occur. This will stop working when - // we remove handling for empty resolved on delete. - name: "Invalid image, no resolved or resources", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - }, - expect: func(r *recorders) { - r.image.ListImages(images.ListOpts{Name: imageName}).Return([]images.Image{}, nil) - }, - wantErr: true, - wantRemoveFinalizer: true, - }, - { - name: "No instance id, server and ports exist", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - Resources: &infrav1.MachineResources{ - Ports: defaultPortsStatus, - }, - }, - }, - expect: func(r *recorders) { - deleteServerByName(r) - deleteDefaultPorts(r) - }, - wantRemoveFinalizer: true, - }, - { - name: "Adopt ports error should fail deletion and retry", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - Status: infrav1.OpenStackMachineStatus{ - Resolved: &infrav1.ResolvedMachineSpec{ - ImageID: imageUUID, - Ports: defaultResolvedPorts, - }, - }, - }, - expect: func(r *recorders) { - r.network.ListPort(ports.ListOpts{ - NetworkID: networkUUID, - Name: openStackMachineName + "-0", - }).Return(nil, fmt.Errorf("error adopting ports")) - }, - wantErr: true, - wantRemoveFinalizer: false, - }, - { - // This is an upgrade case because from v0.10 onwards we - // should not have added the finalizer until the cluster - // is ready. - name: "Cluster not ready should remove finalizer", - osMachine: infrav1.OpenStackMachine{ - Spec: infrav1.OpenStackMachineSpec{ - Image: defaultImage, - }, - }, - clusterNotReady: true, - wantRemoveFinalizer: true, - }, - } - for i := range tests { - tt := &tests[i] - t.Run(tt.name, func(t *testing.T) { - g := NewGomegaWithT(t) - log := testr.New(t) - - mockCtrl := gomock.NewController(t) - mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "") - - reconciler := OpenStackMachineReconciler{} - - computeRecorder := mockScopeFactory.ComputeClient.EXPECT() - imageRecorder := mockScopeFactory.ImageClient.EXPECT() - networkRecorder := mockScopeFactory.NetworkClient.EXPECT() - volumeRecorder := mockScopeFactory.VolumeClient.EXPECT() - - if tt.expect != nil { - tt.expect(&recorders{computeRecorder, imageRecorder, networkRecorder, volumeRecorder}) - } - scopeWithLogger := scope.NewWithLogger(mockScopeFactory, log) - - openStackCluster := infrav1.OpenStackCluster{} - openStackCluster.Status.Ready = !tt.clusterNotReady - openStackCluster.Status.Network = &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - Name: "my-network", - ID: networkUUID, - }, - Subnets: []infrav1.Subnet{ - { - Name: "my-subnet", - ID: subnetUUID, - CIDR: "192.168.0.0/24", - }, - }, - } - - machine := clusterv1.Machine{} - - osMachine := &tt.osMachine - osMachine.Name = openStackMachineName - osMachine.Finalizers = []string{infrav1.MachineFinalizer} - - _, err := reconciler.reconcileDelete(scopeWithLogger, openStackMachineName, &openStackCluster, &machine, &tt.osMachine) - - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - if tt.wantRemoveFinalizer { - g.Expect(osMachine.Finalizers).To(BeEmpty()) - } else { - g.Expect(osMachine.Finalizers).To(ConsistOf(infrav1.MachineFinalizer)) - } - }) - } -} diff --git a/controllers/openstackserver_controller.go b/controllers/openstackserver_controller.go new file mode 100644 index 0000000000..02766612b1 --- /dev/null +++ b/controllers/openstackserver_controller.go @@ -0,0 +1,617 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/names" +) + +const ( + SpecHashAnnotation = "infrastructure.cluster.x-k8s.io/spec-hash" +) + +// OpenStackServerReconciler reconciles a OpenStackServer object. +type OpenStackServerReconciler struct { + Client client.Client + Recorder record.EventRecorder + WatchFilterValue string + ScopeFactory scope.Factory + CaCertificates []byte // PEM encoded ca certificates. + + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackservers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackservers/status,verbs=get;update;patch + +func (r *OpenStackServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the OpenStackServer instance. + openStackServer := &infrav1alpha1.OpenStackServer{} + err := r.Client.Get(ctx, req.NamespacedName, openStackServer) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + clientScope, err := r.ScopeFactory.NewClientScopeFromObject(ctx, r.Client, r.CaCertificates, log, openStackServer) + if err != nil { + return reconcile.Result{}, err + } + scope := scope.NewWithLogger(clientScope, log) + + scope.Logger().Info("Reconciling OpenStackServer") + + cluster, err := getClusterFromMetadata(ctx, r.Client, openStackServer.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster != nil { + if annotations.IsPaused(cluster, openStackServer) { + scope.Logger().Info("OpenStackServer %s/%s linked to a Cluster that is paused. Won't reconcile", openStackServer.Namespace, openStackServer.Name) + return reconcile.Result{}, nil + } + } + + patchHelper, err := patch.NewHelper(openStackServer, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + defer func() { + if err := patchServer(ctx, patchHelper, openStackServer); err != nil { + result = ctrl.Result{} + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + if !openStackServer.ObjectMeta.DeletionTimestamp.IsZero() { + return reconcile.Result{}, r.reconcileDelete(scope, openStackServer) + } + + return r.reconcileNormal(ctx, scope, openStackServer) +} + +func patchServer(ctx context.Context, patchHelper *patch.Helper, openStackServer *infrav1alpha1.OpenStackServer, options ...patch.Option) error { + // Always update the readyCondition by summarizing the state of other conditions. + applicableConditions := []clusterv1.ConditionType{ + infrav1.InstanceReadyCondition, + } + + conditions.SetSummary(openStackServer, conditions.WithConditions(applicableConditions...)) + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + // Also, if requested, we are adding additional options like e.g. Patch ObservedGeneration when issuing the + // patch at the end of the reconcile loop. + options = append(options, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + infrav1.InstanceReadyCondition, + }}, + ) + conditions.SetSummary(openStackServer, + conditions.WithConditions( + infrav1.InstanceReadyCondition, + ), + ) + + return patchHelper.Patch(ctx, openStackServer, options...) +} + +func (r *OpenStackServerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1alpha1.OpenStackServer{}). + Complete(r) +} + +func (r *OpenStackServerReconciler) reconcileDelete(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) error { + scope.Logger().Info("Reconciling Server delete") + + computeService, err := compute.NewService(scope) + if err != nil { + return err + } + + networkingService, err := networking.NewService(scope) + if err != nil { + return err + } + + // Check for any orphaned resources + // N.B. Unlike resolveServerResources, we must always look for orphaned resources in the delete path. + if err := adoptServerResources(scope, openStackServer); err != nil { + return fmt.Errorf("adopting server resources: %w", err) + } + + instanceStatus, err := getServerStatus(openStackServer, computeService) + if err != nil { + return err + } + + // If no instance was created we currently need to check for orphaned volumes. + if instanceStatus == nil { + if err := computeService.DeleteVolumes(openStackServer.Name, openStackServer.Spec.RootVolume, openStackServer.Spec.AdditionalBlockDevices); err != nil { + return fmt.Errorf("delete volumes: %w", err) + } + } else { + if err := computeService.DeleteInstance(openStackServer, instanceStatus); err != nil { + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceDeleteFailedReason, clusterv1.ConditionSeverityError, "Deleting instance failed: %v", err) + return fmt.Errorf("delete instance: %w", err) + } + } + + trunkSupported, err := networkingService.IsTrunkExtSupported() + if err != nil { + return err + } + + if openStackServer.Status.Resources != nil { + portsStatus := openStackServer.Status.Resources.Ports + for _, port := range portsStatus { + if err := networkingService.DeleteInstanceTrunkAndPort(openStackServer, port, trunkSupported); err != nil { + return fmt.Errorf("failed to delete port %q: %w", port.ID, err) + } + } + } + + if err := r.reconcileDeleteFloatingAddressFromPool(scope, openStackServer); err != nil { + return err + } + + controllerutil.RemoveFinalizer(openStackServer, infrav1alpha1.OpenStackServerFinalizer) + scope.Logger().Info("Reconciled Server deleted successfully") + return nil +} + +func (r *OpenStackServerReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (_ ctrl.Result, reterr error) { + // If the OpenStackServer is in an error state, return early. + if openStackServer.Status.InstanceState != nil && *openStackServer.Status.InstanceState == infrav1.InstanceStateError { + scope.Logger().Info("Not reconciling server in error state. See openStackServer.status or previously logged error for details") + return ctrl.Result{}, nil + } + + scope.Logger().Info("Reconciling Server create") + + changed, err := resolveServerResources(scope, openStackServer) + if err != nil { + return ctrl.Result{}, err + } + + // Also add the finalizer when writing resolved resources so we can start creating resources on the next reconcile. + if controllerutil.AddFinalizer(openStackServer, infrav1alpha1.OpenStackServerFinalizer) { + changed = true + } + + // We requeue if we either added the finalizer or resolved server + // resources. This means that we never create any resources unless we + // have observed that the finalizer and resolved server resources were + // successfully written in a previous transaction. This in turn means + // that in the delete path we can be sure that if there are no resolved + // resources then no resources were created. + if changed { + scope.Logger().V(6).Info("Server resources updated, requeuing") + return ctrl.Result{}, nil + } + + // Check for orphaned resources previously created but not written to the status + if err := adoptServerResources(scope, openStackServer); err != nil { + return ctrl.Result{}, fmt.Errorf("adopting server resources: %w", err) + } + computeService, err := compute.NewService(scope) + if err != nil { + return ctrl.Result{}, err + } + networkingService, err := networking.NewService(scope) + if err != nil { + return ctrl.Result{}, err + } + + floatingAddressClaim, waitingForFloatingAddress, err := r.reconcileFloatingAddressFromPool(ctx, scope, openStackServer) + if err != nil || waitingForFloatingAddress { + return ctrl.Result{}, err + } + + err = getOrCreateServerPorts(openStackServer, networkingService) + if err != nil { + return ctrl.Result{}, err + } + portIDs := GetPortIDs(openStackServer.Status.Resources.Ports) + + instanceStatus, err := r.getOrCreateServer(ctx, scope.Logger(), openStackServer, computeService, portIDs) + if err != nil || instanceStatus == nil { + // Conditions set in getOrCreateInstance + return ctrl.Result{}, err + } + + instanceNS, err := instanceStatus.NetworkStatus() + if err != nil { + return ctrl.Result{}, fmt.Errorf("get network status: %w", err) + } + + if floatingAddressClaim != nil { + if err := r.associateIPAddressFromIPAddressClaim(ctx, openStackServer, instanceStatus, instanceNS, floatingAddressClaim, networkingService); err != nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Failed while associating ip from pool: %v", err) + return ctrl.Result{}, err + } + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + } + + state := instanceStatus.State() + openStackServer.Status.InstanceID = ptr.To(instanceStatus.ID()) + openStackServer.Status.InstanceState = &state + + switch instanceStatus.State() { + case infrav1.InstanceStateActive: + scope.Logger().Info("Server instance state is ACTIVE", "id", instanceStatus.ID()) + conditions.MarkTrue(openStackServer, infrav1.InstanceReadyCondition) + openStackServer.Status.Ready = true + case infrav1.InstanceStateError: + scope.Logger().Info("Server instance state is ERROR", "id", instanceStatus.ID()) + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceStateErrorReason, clusterv1.ConditionSeverityError, "") + return ctrl.Result{}, nil + case infrav1.InstanceStateDeleted: + // we should avoid further actions for DELETED VM + scope.Logger().Info("Server instance state is DELETED, no actions") + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceDeletedReason, clusterv1.ConditionSeverityError, "") + return ctrl.Result{}, nil + case infrav1.InstanceStateBuild, infrav1.InstanceStateUndefined: + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) + return ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil + default: + // The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE + // due to potential conflict or unexpected actions + scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State()) + conditions.MarkUnknown(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %s", instanceStatus.State()) + + return ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile}, nil + } + + scope.Logger().Info("Reconciled Server create successfully") + return ctrl.Result{}, nil +} + +// resolveServerResources resolves and stores the OpenStack resources for the server. +func resolveServerResources(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (bool, error) { + if openStackServer.Status.Resources == nil { + openStackServer.Status.Resources = &infrav1alpha1.ServerResources{} + } + resolved := openStackServer.Status.Resolved + if resolved == nil { + resolved = &infrav1alpha1.ResolvedServerSpec{} + openStackServer.Status.Resolved = resolved + } + return compute.ResolveServerSpec(scope, openStackServer) +} + +// adoptServerResources adopts the OpenStack resources for the server. +func adoptServerResources(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) error { + resources := openStackServer.Status.Resources + if resources == nil { + resources = &infrav1alpha1.ServerResources{} + openStackServer.Status.Resources = resources + } + + // Adopt any existing resources + return compute.AdoptServerResources(scope, openStackServer.Status.Resolved, resources) +} + +func getOrCreateServerPorts(openStackServer *infrav1alpha1.OpenStackServer, networkingService *networking.Service) error { + resolved := openStackServer.Status.Resolved + if resolved == nil { + return errors.New("server status resolved is nil") + } + resources := openStackServer.Status.Resources + if resources == nil { + return errors.New("server status resources is nil") + } + desiredPorts := resolved.Ports + + if len(desiredPorts) == len(resources.Ports) { + return nil + } + + if err := networkingService.CreatePorts(openStackServer, desiredPorts, resources); err != nil { + return fmt.Errorf("creating ports: %w", err) + } + + return nil +} + +// getOrCreateServer gets or creates a server instance and returns the instance status, or an error. +func (r *OpenStackServerReconciler) getOrCreateServer(ctx context.Context, logger logr.Logger, openStackServer *infrav1alpha1.OpenStackServer, computeService *compute.Service, portIDs []string) (*compute.InstanceStatus, error) { + var instanceStatus *compute.InstanceStatus + var err error + + if openStackServer.Status.InstanceID != nil { + instanceStatus, err = computeService.GetInstanceStatus(*openStackServer.Status.InstanceID) + if err != nil { + logger.Info("Unable to get OpenStack instance", "name", openStackServer.Name) + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.OpenStackErrorReason, clusterv1.ConditionSeverityError, err.Error()) + return nil, err + } + } + if instanceStatus == nil { + // Check if there is an existing instance with machine name, in case where instance ID would not have been stored in machine status + instanceStatus, err := computeService.GetInstanceStatusByName(openStackServer, openStackServer.Name) + if err != nil { + logger.Error(err, "Failed to get instance by name", "name", openStackServer.Name) + return nil, err + } + if instanceStatus != nil { + logger.Info("Server already exists", "name", openStackServer.Name, "id", instanceStatus.ID()) + return instanceStatus, nil + } + if openStackServer.Status.InstanceID != nil { + logger.Info("Not reconciling server in failed state. The previously existing OpenStack instance is no longer available") + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists") + return nil, nil + } + + logger.Info("Server does not exist, creating Server", "name", openStackServer.Name) + instanceSpec, err := r.serverToInstanceSpec(ctx, openStackServer) + if err != nil { + return nil, err + } + instanceSpec.Name = openStackServer.Name + instanceStatus, err = computeService.CreateInstance(openStackServer, instanceSpec, portIDs) + if err != nil { + conditions.MarkFalse(openStackServer, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + openStackServer.Status.InstanceState = &infrav1.InstanceStateError + return nil, fmt.Errorf("create OpenStack instance: %w", err) + } + return instanceStatus, nil + } + return instanceStatus, nil +} + +func (r *OpenStackServerReconciler) getUserDataSecretValue(ctx context.Context, namespace, secretName string) (string, error) { + secret := &corev1.Secret{} + key := types.NamespacedName{Namespace: namespace, Name: secretName} + if err := r.Client.Get(ctx, key, secret); err != nil { + return "", fmt.Errorf("failed to get secret %s/%s: %w", namespace, secretName, err) + } + + value, ok := secret.Data["value"] + if !ok { + return "", fmt.Errorf("secret %s/%s does not contain userData", namespace, secretName) + } + + return base64.StdEncoding.EncodeToString(value), nil +} + +func (r *OpenStackServerReconciler) serverToInstanceSpec(ctx context.Context, openStackServer *infrav1alpha1.OpenStackServer) (*compute.InstanceSpec, error) { + resolved := openStackServer.Status.Resolved + if resolved == nil { + return nil, errors.New("server resolved is nil") + } + + serverMetadata := make(map[string]string, len(openStackServer.Spec.ServerMetadata)) + for i := range openStackServer.Spec.ServerMetadata { + key := openStackServer.Spec.ServerMetadata[i].Key + value := openStackServer.Spec.ServerMetadata[i].Value + serverMetadata[key] = value + } + + instanceSpec := &compute.InstanceSpec{ + AdditionalBlockDevices: openStackServer.Spec.AdditionalBlockDevices, + ConfigDrive: openStackServer.Spec.ConfigDrive != nil && *openStackServer.Spec.ConfigDrive, + Flavor: openStackServer.Spec.Flavor, + ImageID: resolved.ImageID, + Metadata: serverMetadata, + Name: openStackServer.Name, + RootVolume: openStackServer.Spec.RootVolume, + SSHKeyName: openStackServer.Spec.SSHKeyName, + ServerGroupID: resolved.ServerGroupID, + Tags: openStackServer.Spec.Tags, + Trunk: openStackServer.Spec.Trunk != nil && *openStackServer.Spec.Trunk, + } + + if openStackServer.Spec.UserDataRef != nil { + userData, err := r.getUserDataSecretValue(ctx, openStackServer.Namespace, openStackServer.Spec.UserDataRef.Name) + if err != nil { + return nil, fmt.Errorf("failed to get user data secret value") + } + instanceSpec.UserData = userData + } + + if openStackServer.Spec.AvailabilityZone != nil { + instanceSpec.FailureDomain = *openStackServer.Spec.AvailabilityZone + } + + return instanceSpec, nil +} + +func getServerStatus(openStackServer *infrav1alpha1.OpenStackServer, computeService *compute.Service) (*compute.InstanceStatus, error) { + if openStackServer.Status.InstanceID != nil { + return computeService.GetInstanceStatus(*openStackServer.Status.InstanceID) + } + return computeService.GetInstanceStatusByName(openStackServer, openStackServer.Name) +} + +// getClusterFromMetadata returns the Cluster object (if present) using the object metadata. +// This function was copied from the cluster-api project but manages errors differently. +func getClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + // If the object is unlabeled, return early with no error. + // It's fine for this object to not be part of a cluster. + if obj.Labels[clusterv1.ClusterNameLabel] == "" { + return nil, nil + } + // At this point, the object has a cluster name label so we should be able to find the cluster + // and return an error if we can't. + return util.GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.ClusterNameLabel]) +} + +// reconcileFloatingAddressFromPool reconciles the floating IP address from the pool. +// It returns the IPAddressClaim and a boolean indicating if the IPAddressClaim is ready. +func (r *OpenStackServerReconciler) reconcileFloatingAddressFromPool(ctx context.Context, scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (*ipamv1.IPAddressClaim, bool, error) { + if openStackServer.Spec.FloatingIPPoolRef == nil { + return nil, false, nil + } + var claim *ipamv1.IPAddressClaim + claim, err := r.getOrCreateIPAddressClaimForFloatingAddress(ctx, scope, openStackServer) + if err != nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityInfo, "Failed to reconcile floating IP claims: %v", err) + return nil, true, err + } + if claim.Status.AddressRef.Name == "" { + r.Recorder.Eventf(openStackServer, corev1.EventTypeNormal, "WaitingForIPAddressClaim", "Waiting for IPAddressClaim %s/%s to be allocated", claim.Namespace, claim.Name) + return claim, true, nil + } + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + return claim, false, nil +} + +// createIPAddressClaim creates IPAddressClaim for the FloatingAddressFromPool if it does not exist yet. +func (r *OpenStackServerReconciler) getOrCreateIPAddressClaimForFloatingAddress(ctx context.Context, scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (*ipamv1.IPAddressClaim, error) { + var err error + + poolRef := openStackServer.Spec.FloatingIPPoolRef + claimName := names.GetFloatingAddressClaimName(openStackServer.Name) + claim := &ipamv1.IPAddressClaim{} + + err = r.Client.Get(ctx, client.ObjectKey{Namespace: openStackServer.Namespace, Name: claimName}, claim) + if err == nil { + return claim, nil + } else if client.IgnoreNotFound(err) != nil { + return nil, err + } + + claim = &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName, + Namespace: openStackServer.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: openStackServer.APIVersion, + Kind: openStackServer.Kind, + Name: openStackServer.Name, + UID: openStackServer.UID, + }, + }, + Finalizers: []string{infrav1.IPClaimMachineFinalizer}, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: *poolRef, + }, + } + + // If the OpenStackServer has a ClusterNameLabel, set it on the IPAddressClaim as well. + // This is useful for garbage collection of IPAddressClaims when a Cluster is deleted. + if openStackServer.ObjectMeta.Labels[clusterv1.ClusterNameLabel] != "" { + claim.ObjectMeta.Labels[clusterv1.ClusterNameLabel] = openStackServer.ObjectMeta.Labels[clusterv1.ClusterNameLabel] + } + + if err := r.Client.Create(ctx, claim); err != nil { + return nil, err + } + + r.Recorder.Eventf(openStackServer, corev1.EventTypeNormal, "CreatingIPAddressClaim", "Creating IPAddressClaim %s/%s", claim.Namespace, claim.Name) + scope.Logger().Info("Created IPAddressClaim", "name", claim.Name) + return claim, nil +} + +func (r *OpenStackServerReconciler) associateIPAddressFromIPAddressClaim(ctx context.Context, openStackServer *infrav1alpha1.OpenStackServer, instanceStatus *compute.InstanceStatus, instanceNS *compute.InstanceNetworkStatus, claim *ipamv1.IPAddressClaim, networkingService *networking.Service) error { + address := &ipamv1.IPAddress{} + addressKey := client.ObjectKey{Namespace: openStackServer.Namespace, Name: claim.Status.AddressRef.Name} + + if err := r.Client.Get(ctx, addressKey, address); err != nil { + return err + } + + instanceAddresses := instanceNS.Addresses() + for _, instanceAddress := range instanceAddresses { + if instanceAddress.Address == address.Spec.Address { + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + return nil + } + } + + fip, err := networkingService.GetFloatingIP(address.Spec.Address) + if err != nil { + return err + } + + if fip == nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "floating IP does not exist") + return fmt.Errorf("floating IP %q does not exist", address.Spec.Address) + } + + port, err := networkingService.GetPortForExternalNetwork(instanceStatus.ID(), fip.FloatingNetworkID) + if err != nil { + return fmt.Errorf("get port for floating IP %q: %w", fip.FloatingIP, err) + } + + if port == nil { + conditions.MarkFalse(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition, infrav1.FloatingAddressFromPoolErrorReason, clusterv1.ConditionSeverityError, "Can't find port for floating IP %q on external network %s", fip.FloatingIP, fip.FloatingNetworkID) + return fmt.Errorf("port for floating IP %q on network %s does not exist", fip.FloatingIP, fip.FloatingNetworkID) + } + + if err = networkingService.AssociateFloatingIP(openStackServer, fip, port.ID); err != nil { + return err + } + conditions.MarkTrue(openStackServer, infrav1.FloatingAddressFromPoolReadyCondition) + return nil +} + +func (r *OpenStackServerReconciler) reconcileDeleteFloatingAddressFromPool(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) error { + log := scope.Logger().WithValues("openStackMachine", openStackServer.Name) + log.Info("Reconciling Machine delete floating address from pool") + if openStackServer.Spec.FloatingIPPoolRef == nil { + return nil + } + claimName := names.GetFloatingAddressClaimName(openStackServer.Name) + claim := &ipamv1.IPAddressClaim{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: openStackServer.Namespace, Name: claimName}, claim); err != nil { + return client.IgnoreNotFound(err) + } + + controllerutil.RemoveFinalizer(claim, infrav1.IPClaimMachineFinalizer) + return r.Client.Update(context.Background(), claim) +} diff --git a/controllers/openstackserver_controller_test.go b/controllers/openstackserver_controller_test.go new file mode 100644 index 0000000000..adde365964 --- /dev/null +++ b/controllers/openstackserver_controller_test.go @@ -0,0 +1,543 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "reflect" + "testing" + + "github.com/go-logr/logr/testr" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/portsbinding" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/trunks" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" + . "github.com/onsi/gomega" //nolint:revive + "go.uber.org/mock/gomock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" +) + +const ( + openStackServerName = "test-openstack-server" + instanceUUID = "8308882f-5e46-47e6-8e12-1fe869c43d1d" + portUUID = "55eac199-4836-4a98-b31c-9f65f382ad46" + rootVolumeUUID = "4724a66d-bd5e-47f3-bb57-a67fcb4168e0" + trunkUUID = "9d348baa-93b1-4e63-932f-dd0527fbd789" + imageName = "my-image" + defaultFlavor = "m1.small" +) + +type recorders struct { + compute *mock.MockComputeClientMockRecorder + image *mock.MockImageClientMockRecorder + network *mock.MockNetworkClientMockRecorder + volume *mock.MockVolumeClientMockRecorder +} + +var defaultImage = infrav1.ImageParam{ + Filter: &infrav1.ImageFilter{ + Name: ptr.To(imageName), + }, +} + +var defaultPortOpts = []infrav1.PortOpts{ + { + Network: &infrav1.NetworkParam{ + ID: ptr.To(networkUUID), + }, + }, +} + +var defaultResolvedPorts = []infrav1.ResolvedPortSpec{ + { + Name: openStackServerName + "-0", + NetworkID: networkUUID, + }, +} + +var defaultPortsStatus = []infrav1.PortStatus{ + { + ID: portUUID, + }, +} + +var getDefaultFlavor = func(r *recorders) { + f := flavors.Flavor{ + Name: defaultFlavor, + } + r.compute.GetFlavorFromName(defaultFlavor).Return(&f, nil) +} + +var createDefaultPort = func(r *recorders) { + createOpts := ports.CreateOpts{ + Name: openStackServerName + "-0", + NetworkID: networkUUID, + } + portsBuilder := portsbinding.CreateOptsExt{ + CreateOptsBuilder: createOpts, + } + r.network.CreatePort(portsBuilder).Return(&ports.Port{ + ID: portUUID, + }, nil) +} + +var createDefaultServer = func(r *recorders) { + // Mock any server creation + r.compute.CreateServer(gomock.Any(), gomock.Any()).Return(&servers.Server{ID: instanceUUID}, nil) +} + +var listDefaultPorts = func(r *recorders) { + r.network.ListPort(ports.ListOpts{ + Name: openStackServerName + "-0", + NetworkID: networkUUID, + }).Return([]ports.Port{ + { + ID: portUUID, + }, + }, nil) +} + +var listDefaultPortsNotFound = func(r *recorders) { + r.network.ListPort(ports.ListOpts{ + Name: openStackServerName + "-0", + NetworkID: networkUUID, + }).Return(nil, nil) +} + +var listDefaultServerNotFound = func(r *recorders) { + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return([]servers.Server{}, nil) +} + +var listDefaultServerFound = func(r *recorders) { + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return([]servers.Server{{ID: instanceUUID}}, nil) +} + +var deleteDefaultPorts = func(r *recorders) { + trunkExtension := extensions.Extension{} + trunkExtension.Alias = "trunk" + r.network.ListExtensions().Return([]extensions.Extension{trunkExtension}, nil) + r.network.ListTrunk(trunks.ListOpts{PortID: portUUID}).Return([]trunks.Trunk{{ID: trunkUUID}}, nil) + r.network.ListTrunkSubports(trunkUUID).Return([]trunks.Subport{}, nil) + r.network.DeleteTrunk(trunkUUID).Return(nil) + r.network.DeletePort(portUUID).Return(nil) +} + +var deleteServerByID = func(r *recorders) { + r.compute.GetServer(instanceUUID).Return(&servers.Server{ID: instanceUUID, Name: openStackServerName}, nil) + r.compute.DeleteServer(instanceUUID).Return(nil) + r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) +} + +var deleteServerByName = func(r *recorders) { + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return([]servers.Server{{ID: instanceUUID, Name: openStackServerName}}, nil) + r.compute.DeleteServer(instanceUUID).Return(nil) + r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404}) +} + +var deleteMissingServerByName = func(r *recorders) { + // Lookup server by name because it is not in status. + // Don't find it. + r.compute.ListServers(servers.ListOpts{ + Name: "^" + openStackServerName + "$", + }).Return(nil, nil) +} + +var deleteRootVolume = func(r *recorders) { + // Fetch volume by name + volumeName := fmt.Sprintf("%s-root", openStackServerName) + r.volume.ListVolumes(volumes.ListOpts{ + AllTenants: false, + Name: volumeName, + TenantID: "", + }).Return([]volumes.Volume{{ + ID: rootVolumeUUID, + Name: volumeName, + }}, nil) + + // Delete volume + r.volume.DeleteVolume(rootVolumeUUID, volumes.DeleteOpts{}).Return(nil) +} + +func TestOpenStackServer_serverToInstanceSpec(t *testing.T) { + tests := []struct { + name string + openStackServer *infrav1alpha1.OpenStackServer + want *compute.InstanceSpec + wantErr bool + }{ + { + name: "Test serverToInstanceSpec without resolved resources", + openStackServer: &infrav1alpha1.OpenStackServer{}, + wantErr: true, + }, + { + name: "Test serverToInstanceSpec with resolved resources", + openStackServer: &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: infrav1alpha1.OpenStackServerSpec{ + AdditionalBlockDevices: []infrav1.AdditionalBlockDevice{ + { + Name: "block-device", + SizeGiB: 10, + Storage: infrav1.BlockDeviceStorage{ + Type: "ceph", + }, + }, + }, + AvailabilityZone: ptr.To("failure-domain"), + ConfigDrive: ptr.To(true), + Flavor: "large", + RootVolume: &infrav1.RootVolume{ + SizeGiB: 10, + BlockDeviceVolume: infrav1.BlockDeviceVolume{ + Type: "fast", + }, + }, + ServerMetadata: []infrav1.ServerMetadata{{Key: "key", Value: "value"}}, + SSHKeyName: "key", + Tags: []string{"tag1", "tag2"}, + Trunk: ptr.To(true), + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: "123", + ServerGroupID: "456", + }, + }, + }, + want: &compute.InstanceSpec{ + AdditionalBlockDevices: []infrav1.AdditionalBlockDevice{ + { + Name: "block-device", + SizeGiB: 10, + Storage: infrav1.BlockDeviceStorage{ + Type: "ceph", + }, + }, + }, + ConfigDrive: true, + FailureDomain: "failure-domain", + Flavor: "large", + ImageID: "123", + Metadata: map[string]string{ + "key": "value", + }, + Name: "test", + RootVolume: &infrav1.RootVolume{ + SizeGiB: 10, + BlockDeviceVolume: infrav1.BlockDeviceVolume{ + Type: "fast", + }, + }, + ServerGroupID: "456", + SSHKeyName: "key", + Tags: []string{"tag1", "tag2"}, + Trunk: true, + }, + }, + } + for i := range tests { + tt := tests[i] + t.Run(tt.name, func(t *testing.T) { + reconciler := OpenStackServerReconciler{} + spec, err := reconciler.serverToInstanceSpec(ctx, tt.openStackServer) + if (err != nil) != tt.wantErr { + t.Fatalf("serverToInstanceSpec() error = %+v, wantErr %+v", err, tt.wantErr) + } + if err == nil && !reflect.DeepEqual(spec, tt.want) { + t.Errorf("serverToInstanceSpec() got = %+v, want %+v", spec, tt.want) + } + }) + } +} + +func Test_OpenStackServerReconcileDelete(t *testing.T) { + tests := []struct { + name string + osServer infrav1alpha1.OpenStackServer + expect func(r *recorders) + wantErr bool + wantRemoveFinalizer bool + }{ + { + name: "No volumes, resolved and resources populated", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: defaultFlavor, + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + InstanceID: ptr.To(instanceUUID), + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + deleteServerByID(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "Root volume, resolved and resources populated", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + RootVolume: &infrav1.RootVolume{ + SizeGiB: 50, + }, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + InstanceID: ptr.To(instanceUUID), + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + // Server exists, so we don't delete root volume explicitly + deleteServerByID(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "Root volume, server not created, resolved and resources populated", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + RootVolume: &infrav1.RootVolume{ + SizeGiB: 50, + }, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + deleteMissingServerByName(r) + deleteRootVolume(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "No instance id, server and ports exist", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + Resources: &infrav1alpha1.ServerResources{ + Ports: defaultPortsStatus, + }, + }, + }, + expect: func(r *recorders) { + deleteServerByName(r) + deleteDefaultPorts(r) + }, + wantRemoveFinalizer: true, + }, + { + name: "Adopt ports error should fail deletion and retry", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + }, + }, + expect: func(r *recorders) { + r.network.ListPort(ports.ListOpts{ + NetworkID: networkUUID, + Name: openStackServerName + "-0", + }).Return(nil, fmt.Errorf("error adopting ports")) + }, + wantErr: true, + wantRemoveFinalizer: false, + }, + } + for i := range tests { + tt := &tests[i] + t.Run(tt.name, func(t *testing.T) { + g := NewGomegaWithT(t) + log := testr.New(t) + + mockCtrl := gomock.NewController(t) + mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "") + + reconciler := OpenStackServerReconciler{} + + computeRecorder := mockScopeFactory.ComputeClient.EXPECT() + imageRecorder := mockScopeFactory.ImageClient.EXPECT() + networkRecorder := mockScopeFactory.NetworkClient.EXPECT() + volumeRecorder := mockScopeFactory.VolumeClient.EXPECT() + + if tt.expect != nil { + tt.expect(&recorders{computeRecorder, imageRecorder, networkRecorder, volumeRecorder}) + } + scopeWithLogger := scope.NewWithLogger(mockScopeFactory, log) + + osServer := &tt.osServer + osServer.Name = openStackServerName + osServer.Finalizers = []string{infrav1alpha1.OpenStackServerFinalizer} + + err := reconciler.reconcileDelete(scopeWithLogger, &tt.osServer) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + if tt.wantRemoveFinalizer { + g.Expect(osServer.Finalizers).To(BeEmpty()) + } else { + g.Expect(osServer.Finalizers).To(ConsistOf(infrav1alpha1.OpenStackServerFinalizer)) + } + }) + } +} + +func Test_OpenStackServerReconcileCreate(t *testing.T) { + tests := []struct { + name string + osServer infrav1alpha1.OpenStackServer + expect func(r *recorders) + }{ + { + name: "Minimal server spec creating port and server", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: defaultFlavor, + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + }, + }, + expect: func(r *recorders) { + listDefaultPortsNotFound(r) + createDefaultPort(r) + getDefaultFlavor(r) + listDefaultServerNotFound(r) + createDefaultServer(r) + }, + }, + { + name: "Minimum server spec adopting port and server", + osServer: infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: defaultFlavor, + Image: defaultImage, + Ports: defaultPortOpts, + }, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: &infrav1alpha1.ResolvedServerSpec{ + ImageID: imageUUID, + Ports: defaultResolvedPorts, + }, + }, + }, + expect: func(r *recorders) { + listDefaultPorts(r) + listDefaultServerFound(r) + }, + }, + } + for i := range tests { + tt := &tests[i] + t.Run(tt.name, func(t *testing.T) { + g := NewGomegaWithT(t) + log := testr.New(t) + + mockCtrl := gomock.NewController(t) + mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "") + + reconciler := OpenStackServerReconciler{} + + computeRecorder := mockScopeFactory.ComputeClient.EXPECT() + imageRecorder := mockScopeFactory.ImageClient.EXPECT() + networkRecorder := mockScopeFactory.NetworkClient.EXPECT() + volumeRecorder := mockScopeFactory.VolumeClient.EXPECT() + + if tt.expect != nil { + tt.expect(&recorders{computeRecorder, imageRecorder, networkRecorder, volumeRecorder}) + } + scopeWithLogger := scope.NewWithLogger(mockScopeFactory, log) + + osServer := &tt.osServer + osServer.Name = openStackServerName + osServer.Finalizers = []string{infrav1alpha1.OpenStackServerFinalizer} + + _, err := reconciler.reconcileNormal(ctx, scopeWithLogger, &tt.osServer) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 26d9c9a123..c67d16916f 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -18,29 +18,23 @@ package controllers import ( "context" - "errors" "path/filepath" "testing" - "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" . "github.com/onsi/ginkgo/v2" //nolint:revive . "github.com/onsi/gomega" //nolint:revive - "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" "sigs.k8s.io/cluster-api-provider-openstack/test/helpers/external" ) @@ -81,6 +75,9 @@ var _ = BeforeSuite(func() { err = infrav1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = infrav1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + framework.TryAddDefaultSchemes(scheme.Scheme) // +kubebuilder:scaffold:scheme @@ -124,61 +121,3 @@ var _ = Describe("EnvTest sanity check", func() { // will actually stay in "Terminating" state and never be completely gone. }) }) - -var _ = Describe("When calling getOrCreate", func() { - logger := GinkgoLogr - - var ( - reconsiler OpenStackMachineReconciler - mockCtrl *gomock.Controller - mockScopeFactory *scope.MockScopeFactory - computeService *compute.Service - err error - ) - - BeforeEach(func() { - ctx = context.Background() - reconsiler = OpenStackMachineReconciler{} - mockCtrl = gomock.NewController(GinkgoT()) - mockScopeFactory = scope.NewMockScopeFactory(mockCtrl, "1234") - computeService, err = compute.NewService(scope.NewWithLogger(mockScopeFactory, logger)) - Expect(err).NotTo(HaveOccurred()) - }) - - It("should return an error if unable to get instance", func() { - openStackCluster := &infrav1.OpenStackCluster{} - machine := &clusterv1.Machine{} - openStackMachine := &infrav1.OpenStackMachine{ - Status: infrav1.OpenStackMachineStatus{ - InstanceID: ptr.To("machine-uuid"), - }, - } - - mockScopeFactory.ComputeClient.EXPECT().GetServer(gomock.Any()).Return(nil, errors.New("Test error when getting server")) - instanceStatus, err := reconsiler.getOrCreateInstance(logger, openStackCluster, machine, openStackMachine, computeService, "", []string{}) - Expect(err).To(HaveOccurred()) - Expect(instanceStatus).To(BeNil()) - conditions := openStackMachine.GetConditions() - Expect(len(conditions) > 0).To(BeTrue()) - for i := range conditions { - if conditions[i].Type == infrav1.InstanceReadyCondition { - Expect(conditions[i].Reason).To(Equal(infrav1.OpenStackErrorReason)) - break - } - } - }) - - It("should retrieve instance by name if no ID is stored", func() { - openStackCluster := &infrav1.OpenStackCluster{} - machine := &clusterv1.Machine{} - openStackMachine := &infrav1.OpenStackMachine{} - servers := make([]servers.Server, 1) - servers[0].ID = "machine-uuid" - - mockScopeFactory.ComputeClient.EXPECT().ListServers(gomock.Any()).Return(servers, nil) - instanceStatus, err := reconsiler.getOrCreateInstance(logger, openStackCluster, machine, openStackMachine, computeService, "", []string{}) - Expect(err).ToNot(HaveOccurred()) - Expect(instanceStatus).ToNot(BeNil()) - Expect(instanceStatus.ID()).To(Equal("machine-uuid")) - }) -}) diff --git a/docs/book/src/api/v1alpha1/api.md b/docs/book/src/api/v1alpha1/api.md index fa456a74f9..3e7e1e0047 100644 --- a/docs/book/src/api/v1alpha1/api.md +++ b/docs/book/src/api/v1alpha1/api.md @@ -3,7 +3,287 @@

package v1alpha1 contains API Schema definitions for the infrastructure v1alpha1 API group

Resource Types: - + +

OpenStackServer +

+

+

OpenStackServer is the Schema for the openstackservers API.

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+ +infrastructure.cluster.x-k8s.io/v1alpha1 + +
+kind
+string +
OpenStackServer
+metadata
+ +Kubernetes meta/v1.ObjectMeta + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +OpenStackServerSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+additionalBlockDevices
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.AdditionalBlockDevice + + +
+(Optional) +

AdditionalBlockDevices is a list of specifications for additional block devices to attach to the server instance.

+
+availabilityZone
+ +string + +
+(Optional) +

AvailabilityZone is the availability zone in which to create the server instance.

+
+configDrive
+ +bool + +
+(Optional) +

ConfigDrive is a flag to enable config drive for the server instance.

+
+flavor
+ +string + +
+

The flavor reference for the flavor for the server instance.

+
+floatingIPPoolRef
+ +Kubernetes core/v1.TypedLocalObjectReference + +
+(Optional) +

FloatingIPPoolRef is a reference to a FloatingIPPool to allocate a floating IP from.

+
+identityRef
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.OpenStackIdentityReference + + +
+

IdentityRef is a reference to a secret holding OpenStack credentials.

+
+image
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ImageParam + + +
+

The image to use for the server instance.

+
+ports
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortOpts + + +
+

Ports to be attached to the server instance.

+
+rootVolume
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.RootVolume + + +
+(Optional) +

RootVolume is the specification for the root volume of the server instance.

+
+sshKeyName
+ +string + +
+

SSHKeyName is the name of the SSH key to inject in the instance.

+
+securityGroups
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.SecurityGroupParam + + +
+(Optional) +

SecurityGroups is a list of security groups names to assign to the instance.

+
+serverGroup
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerGroupParam + + +
+(Optional) +

ServerGroup is the server group to which the server instance belongs.

+
+serverMetadata
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerMetadata + + +
+(Optional) +

ServerMetadata is a map of key value pairs to add to the server instance.

+
+tags
+ +[]string + +
+

Tags which will be added to the machine and all dependent resources +which support them. These are in addition to Tags defined on the +cluster. +Requires Nova api 2.52 minimum!

+
+trunk
+ +bool + +
+(Optional) +

Trunk is a flag to indicate if the server instance is created on a trunk port or not.

+
+userDataRef
+ +Kubernetes core/v1.LocalObjectReference + +
+(Optional) +

UserDataRef is a reference to a secret containing the user data to +be injected into the server instance.

+
+
+status
+ + +OpenStackServerStatus + + +
+

OpenStackFloatingIPPool

@@ -287,28 +567,469 @@ sigs.k8s.io/cluster-api/api/v1beta1.Conditions -

ReclaimPolicy -(string alias)

+

OpenStackServerSpec +

(Appears on: -OpenStackFloatingIPPoolSpec) +OpenStackServer)

-

ReclaimPolicy is a string type alias to represent reclaim policies for floating ips.

+

OpenStackServerSpec defines the desired state of OpenStackServer.

- + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ValueField Description

"Delete"

ReclaimDelete is the reclaim policy for floating ips.

+
+additionalBlockDevices
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.AdditionalBlockDevice + +

"Retain"

ReclaimRetain is the reclaim policy for floating ips.

+
+(Optional) +

AdditionalBlockDevices is a list of specifications for additional block devices to attach to the server instance.

+
+availabilityZone
+ +string + +
+(Optional) +

AvailabilityZone is the availability zone in which to create the server instance.

+
+configDrive
+ +bool + +
+(Optional) +

ConfigDrive is a flag to enable config drive for the server instance.

+
+flavor
+ +string + +
+

The flavor reference for the flavor for the server instance.

+floatingIPPoolRef
+ +Kubernetes core/v1.TypedLocalObjectReference + +
+(Optional) +

FloatingIPPoolRef is a reference to a FloatingIPPool to allocate a floating IP from.

+
+identityRef
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.OpenStackIdentityReference + + +
+

IdentityRef is a reference to a secret holding OpenStack credentials.

+
+image
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ImageParam + + +
+

The image to use for the server instance.

+
+ports
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortOpts + + +
+

Ports to be attached to the server instance.

+
+rootVolume
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.RootVolume + + +
+(Optional) +

RootVolume is the specification for the root volume of the server instance.

+
+sshKeyName
+ +string + +
+

SSHKeyName is the name of the SSH key to inject in the instance.

+
+securityGroups
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.SecurityGroupParam + + +
+(Optional) +

SecurityGroups is a list of security groups names to assign to the instance.

+
+serverGroup
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerGroupParam + + +
+(Optional) +

ServerGroup is the server group to which the server instance belongs.

+
+serverMetadata
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerMetadata + + +
+(Optional) +

ServerMetadata is a map of key value pairs to add to the server instance.

+
+tags
+ +[]string + +
+

Tags which will be added to the machine and all dependent resources +which support them. These are in addition to Tags defined on the +cluster. +Requires Nova api 2.52 minimum!

+
+trunk
+ +bool + +
+(Optional) +

Trunk is a flag to indicate if the server instance is created on a trunk port or not.

+
+userDataRef
+ +Kubernetes core/v1.LocalObjectReference + +
+(Optional) +

UserDataRef is a reference to a secret containing the user data to +be injected into the server instance.

+
+

OpenStackServerStatus +

+

+(Appears on: +OpenStackServer) +

+

+

OpenStackServerStatus defines the observed state of OpenStackServer.

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+ready
+ +bool + +
+

Ready is true when the OpenStack server is ready.

+
+instanceID
+ +string + +
+(Optional) +

InstanceID is the ID of the server instance.

+
+instanceState
+ + +sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.InstanceState + + +
+(Optional) +

InstanceState is the state of the server instance.

+
+addresses
+ +[]Kubernetes core/v1.NodeAddress + +
+(Optional) +

Addresses is the list of addresses of the server instance.

+
+resolved
+ + +ResolvedServerSpec + + +
+(Optional) +

Resolved contains parts of the machine spec with all external +references fully resolved.

+
+resources
+ + +ServerResources + + +
+(Optional) +

Resources contains references to OpenStack resources created for the machine.

+
+conditions
+ + +sigs.k8s.io/cluster-api/api/v1beta1.Conditions + + +
+(Optional) +

Conditions defines current service state of the OpenStackServer.

+
+

ReclaimPolicy +(string alias)

+

+(Appears on: +OpenStackFloatingIPPoolSpec) +

+

+

ReclaimPolicy is a string type alias to represent reclaim policies for floating ips.

+

+ + + + + + + + + + + + +
ValueDescription

"Delete"

ReclaimDelete is the reclaim policy for floating ips.

+

"Retain"

ReclaimRetain is the reclaim policy for floating ips.

+
+

ResolvedServerSpec +

+

+(Appears on: +OpenStackServerStatus) +

+

+

ResolvedServerSpec contains resolved references to resources required by the server.

+

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+serverGroupID
+ +string + +
+(Optional) +

ServerGroupID is the ID of the server group the server should be added to and is calculated based on ServerGroupFilter.

+
+imageID
+ +string + +
+(Optional) +

ImageID is the ID of the image to use for the server and is calculated based on ImageFilter.

+
+ports
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ResolvedPortSpec + + +
+(Optional) +

Ports is the fully resolved list of ports to create for the server.

+
+

ServerResources +

+

+(Appears on: +OpenStackServerStatus) +

+

+

ServerResources contains references to OpenStack resources created for the server.

+

+ + + + + + + + + + + + + +
FieldDescription
+ports
+ + +[]sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortStatus + + +
+(Optional) +

Ports is the status of the ports created for the server.

+
+

ServerStatusError +(string alias)

+

+

+ + + + + + + + +
ValueDescription

"CreateError"


diff --git a/docs/book/src/clusteropenstack/configuration.md b/docs/book/src/clusteropenstack/configuration.md index 7dd23be669..d978eb5439 100644 --- a/docs/book/src/clusteropenstack/configuration.md +++ b/docs/book/src/clusteropenstack/configuration.md @@ -720,11 +720,8 @@ If `managedSecurityGroups` is set to a non-nil value (e.g. `{}`), security group ### Making changes to the bastion host -Changes can be made to the bastion instance, like for example changing the flavor. -First, you have to disable the bastion host by setting `enabled: false` in the `OpenStackCluster.Spec.Bastion` field. -The bastion will be deleted, you can check the status of the bastion host by running `kubectl get openstackcluster` and looking at the `Bastion` field in status. -Once it's gone, you can re-enable the bastion host by setting `enabled: true` and then making changes to the bastion instance spec by modifying the `OpenStackCluster.Spec.Bastion.Instance` field. -The bastion host will be re-created with the new instance spec. +Changes can be made to the bastion spec, like for example changing the flavor, by modifying the `OpenStackCluster.Spec.Bastion.Spec` field. +The bastion host will be re-created with the new spec. ### Disabling the bastion diff --git a/docs/book/src/development/development.md b/docs/book/src/development/development.md index 661ac84ec6..06a8b560e1 100644 --- a/docs/book/src/development/development.md +++ b/docs/book/src/development/development.md @@ -503,3 +503,17 @@ This sections goal is to gather various insights into the API design that can se Starting from v1beta1 both `OpenStackMachineStatus` and `BastionsStatus` feature a field named `referencedResources` which aims to include fields that list individual IDs of the resources associated with the machine or bastion. These IDs are calculated on machine or bastion creation and are not intended to be changed during the object lifecycle. Having all the IDs of related resources saved in the statuses allows CAPO to make easy decisions about deleting the related resources when deleting the VM corresponding to the machine or bastion. + +### `OpenStackServer` + +`OpenStackServer` is a new resource introduced as v1alpha1. It is a representation of a server in OpenStack. It is used to manage the lifecycle of the server and to store the server's status. +Both the bastion and the machine are represented by an `OpenStackServer` object. +Even if it's technically possible for an user to create an `OpenStackServer` object directly, it is not supported for now as there is no use case for it. + +To get the list of `OpenStackServer` objects, the user can use the following command: + +```shell +kubectl get openstackservers +``` + +This object is immutable and is created by the controller when a machine or a bastion is created. The `OpenStackServer` object is deleted when the machine or the bastion is deleted. diff --git a/hack/codegen/openapi/zz_generated.openapi.go b/hack/codegen/openapi/zz_generated.openapi.go index 1b56409789..617b8107ce 100644 --- a/hack/codegen/openapi/zz_generated.openapi.go +++ b/hack/codegen/openapi/zz_generated.openapi.go @@ -306,6 +306,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackFloatingIPPoolList": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackFloatingIPPoolList(ref), "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackFloatingIPPoolSpec": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackFloatingIPPoolSpec(ref), "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackFloatingIPPoolStatus": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackFloatingIPPoolStatus(ref), + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServer": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServer(ref), + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServerList": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServerList(ref), + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServerSpec": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServerSpec(ref), + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServerStatus": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServerStatus(ref), + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.ResolvedServerSpec": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_ResolvedServerSpec(ref), + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.ServerResources": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_ServerResources(ref), "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6.APIServerLoadBalancer": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha6_APIServerLoadBalancer(ref), "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6.AddressPair": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha6_AddressPair(ref), "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6.Bastion": schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha6_Bastion(ref), @@ -15765,6 +15771,435 @@ func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackFloat } } +func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServer(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OpenStackServer is the Schema for the openstackservers API.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServerSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServerStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServerSpec", "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServerStatus"}, + } +} + +func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServerList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OpenStackServerList contains a list of OpenStackServer.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServer"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.OpenStackServer"}, + } +} + +func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OpenStackServerSpec defines the desired state of OpenStackServer.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "additionalBlockDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "name", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "AdditionalBlockDevices is a list of specifications for additional block devices to attach to the server instance.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.AdditionalBlockDevice"), + }, + }, + }, + }, + }, + "availabilityZone": { + SchemaProps: spec.SchemaProps{ + Description: "AvailabilityZone is the availability zone in which to create the server instance.", + Type: []string{"string"}, + Format: "", + }, + }, + "configDrive": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigDrive is a flag to enable config drive for the server instance.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "flavor": { + SchemaProps: spec.SchemaProps{ + Description: "The flavor reference for the flavor for the server instance.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "floatingIPPoolRef": { + SchemaProps: spec.SchemaProps{ + Description: "FloatingIPPoolRef is a reference to a FloatingIPPool to allocate a floating IP from.", + Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"), + }, + }, + "identityRef": { + SchemaProps: spec.SchemaProps{ + Description: "IdentityRef is a reference to a secret holding OpenStack credentials.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.OpenStackIdentityReference"), + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "The image to use for the server instance.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ImageParam"), + }, + }, + "ports": { + SchemaProps: spec.SchemaProps{ + Description: "Ports to be attached to the server instance.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortOpts"), + }, + }, + }, + }, + }, + "rootVolume": { + SchemaProps: spec.SchemaProps{ + Description: "RootVolume is the specification for the root volume of the server instance.", + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.RootVolume"), + }, + }, + "sshKeyName": { + SchemaProps: spec.SchemaProps{ + Description: "SSHKeyName is the name of the SSH key to inject in the instance.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "securityGroups": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityGroups is a list of security groups names to assign to the instance.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.SecurityGroupParam"), + }, + }, + }, + }, + }, + "serverGroup": { + SchemaProps: spec.SchemaProps{ + Description: "ServerGroup is the server group to which the server instance belongs.", + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerGroupParam"), + }, + }, + "serverMetadata": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "key", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ServerMetadata is a map of key value pairs to add to the server instance.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerMetadata"), + }, + }, + }, + }, + }, + "tags": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "set", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Tags which will be added to the machine and all dependent resources which support them. These are in addition to Tags defined on the cluster. Requires Nova api 2.52 minimum!", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "trunk": { + SchemaProps: spec.SchemaProps{ + Description: "Trunk is a flag to indicate if the server instance is created on a trunk port or not.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "userDataRef": { + SchemaProps: spec.SchemaProps{ + Description: "UserDataRef is a reference to a secret containing the user data to be injected into the server instance.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + Required: []string{"flavor", "identityRef", "image", "ports", "sshKeyName"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.TypedLocalObjectReference", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.AdditionalBlockDevice", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ImageParam", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.OpenStackIdentityReference", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortOpts", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.RootVolume", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.SecurityGroupParam", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerGroupParam", "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ServerMetadata"}, + } +} + +func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_OpenStackServerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OpenStackServerStatus defines the observed state of OpenStackServer.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ready": { + SchemaProps: spec.SchemaProps{ + Description: "Ready is true when the OpenStack server is ready.", + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "instanceID": { + SchemaProps: spec.SchemaProps{ + Description: "InstanceID is the ID of the server instance.", + Type: []string{"string"}, + Format: "", + }, + }, + "instanceState": { + SchemaProps: spec.SchemaProps{ + Description: "InstanceState is the state of the server instance.", + Type: []string{"string"}, + Format: "", + }, + }, + "addresses": { + SchemaProps: spec.SchemaProps{ + Description: "Addresses is the list of addresses of the server instance.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.NodeAddress"), + }, + }, + }, + }, + }, + "resolved": { + SchemaProps: spec.SchemaProps{ + Description: "Resolved contains parts of the machine spec with all external references fully resolved.", + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.ResolvedServerSpec"), + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Resources contains references to OpenStack resources created for the machine.", + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.ServerResources"), + }, + }, + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "Conditions defines current service state of the OpenStackServer.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.Condition"), + }, + }, + }, + }, + }, + }, + Required: []string{"ready"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeAddress", "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.ResolvedServerSpec", "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1.ServerResources", "sigs.k8s.io/cluster-api/api/v1beta1.Condition"}, + } +} + +func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_ResolvedServerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolvedServerSpec contains resolved references to resources required by the server.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "serverGroupID": { + SchemaProps: spec.SchemaProps{ + Description: "ServerGroupID is the ID of the server group the server should be added to and is calculated based on ServerGroupFilter.", + Type: []string{"string"}, + Format: "", + }, + }, + "imageID": { + SchemaProps: spec.SchemaProps{ + Description: "ImageID is the ID of the image to use for the server and is calculated based on ImageFilter.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + SchemaProps: spec.SchemaProps{ + Description: "Ports is the fully resolved list of ports to create for the server.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ResolvedPortSpec"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.ResolvedPortSpec"}, + } +} + +func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha1_ServerResources(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServerResources contains references to OpenStack resources created for the server.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ports": { + SchemaProps: spec.SchemaProps{ + Description: "Ports is the status of the ports created for the server.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortStatus"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1.PortStatus"}, + } +} + func schema_sigsk8sio_cluster_api_provider_openstack_api_v1alpha6_APIServerLoadBalancer(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/main.go b/main.go index 76329967b3..27248957d3 100644 --- a/main.go +++ b/main.go @@ -350,6 +350,17 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, caCerts []byte, sco setupLog.Error(err, "unable to create controller", "controller", "FloatingIPPool") os.Exit(1) } + if err := (&controllers.OpenStackServerReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("openstackserver-controller"), + WatchFilterValue: watchFilterValue, + ScopeFactory: scopeFactory, + CaCertificates: caCerts, + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OpenStackServer") + os.Exit(1) + } } func setupWebhooks(mgr ctrl.Manager) { diff --git a/pkg/cloud/services/compute/referenced_resources.go b/pkg/cloud/services/compute/referenced_resources.go index 26e321c391..97d3fbfa2a 100644 --- a/pkg/cloud/services/compute/referenced_resources.go +++ b/pkg/cloud/services/compute/referenced_resources.go @@ -20,20 +20,36 @@ import ( "fmt" "slices" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) -// ResolveMachineSpec is responsible for populating a ResolvedMachineSpec from +// ResolveServerSpec is responsible for populating a ResolvedServerSpec from // an OpenStackMachineSpec and any external dependencies. The result contains no // external dependencies, and does not require any complex logic on creation. -// Note that we only set the fields in ResolvedMachineSpec that are not set yet. This is ok because: -// - OpenStackMachine is immutable, so we can't change the spec after the machine is created. -// - the bastion is mutable, but we delete the bastion when the spec changes, so the bastion status will be empty. -func ResolveMachineSpec(scope *scope.WithLogger, spec *infrav1.OpenStackMachineSpec, resolved *infrav1.ResolvedMachineSpec, clusterResourceName, baseName string, openStackCluster *infrav1.OpenStackCluster, managedSecurityGroup *string) (changed bool, err error) { +// Note that we only set the fields in ResolvedServerSpec that are not set yet. This is ok because +// OpenStackServer is immutable, so we can't change the spec after the machine is created. +func ResolveServerSpec(scope *scope.WithLogger, openStackServer *infrav1alpha1.OpenStackServer) (changed bool, err error) { changed = false + spec := &openStackServer.Spec + resolved := openStackServer.Status.Resolved + if resolved == nil { + resolved = &infrav1alpha1.ResolvedServerSpec{} + openStackServer.Status.Resolved = resolved + } + + // If the server is bound to a cluster, we use the cluster name to generate the port description. + var clusterName string + if openStackServer.ObjectMeta.Labels[clusterv1.ClusterNameLabel] != "" { + clusterName = openStackServer.ObjectMeta.Labels[clusterv1.ClusterNameLabel] + } + computeService, err := NewService(scope) if err != nil { return changed, err @@ -64,20 +80,25 @@ func ResolveMachineSpec(scope *scope.WithLogger, spec *infrav1.OpenStackMachineS changed = true } - // ConstructPorts requires the cluster network to have been set. We only - // call this from places where we know it should have been set, but the - // cluster status is externally-provided data so we check it anyway. - if openStackCluster.Status.Network == nil { - return changed, fmt.Errorf("called ResolveMachineSpec with nil OpenStackCluster.Status.Network") - } + specTrunk := ptr.Deref(spec.Trunk, false) // Network resources are required in order to get ports options. + // Notes: + // - clusterResourceName is not used in this context, so we pass an empty string. In the future, + // we may want to remove that (it's only used for the port description) or allow a user to pass + // a custom description. + // - managedSecurityGroup is not used in this context, so we pass nil. The security groups are + // passed in the spec.SecurityGroups and spec.Ports. + // - We run a safety check to ensure that the resolved.Ports has the same length as the spec.Ports. + // This is to ensure that we don't accidentally add ports to the resolved.Ports that are not in the spec. if len(resolved.Ports) == 0 { - defaultNetwork := openStackCluster.Status.Network - portsOpts, err := networkingService.ConstructPorts(spec.Ports, spec.SecurityGroups, spec.Trunk, clusterResourceName, baseName, defaultNetwork, managedSecurityGroup, InstanceTags(spec, openStackCluster)) + portsOpts, err := networkingService.ConstructPorts(spec.Ports, spec.SecurityGroups, specTrunk, clusterName, openStackServer.Name, nil, nil, spec.Tags) if err != nil { return changed, err } + if portsOpts != nil && len(portsOpts) != len(spec.Ports) { + return changed, fmt.Errorf("resolved.Ports has a different length than spec.Ports") + } resolved.Ports = portsOpts changed = true } diff --git a/pkg/cloud/services/compute/referenced_resources_test.go b/pkg/cloud/services/compute/referenced_resources_test.go index f885214596..12ff5df6a5 100644 --- a/pkg/cloud/services/compute/referenced_resources_test.go +++ b/pkg/cloud/services/compute/referenced_resources_test.go @@ -18,6 +18,7 @@ package compute import ( "reflect" + "slices" "testing" "github.com/go-logr/logr/testr" @@ -26,14 +27,17 @@ import ( "github.com/gophercloud/gophercloud/v2/openstack/image/v2/images" . "github.com/onsi/gomega" //nolint:revive "go.uber.org/mock/gomock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) -func Test_ResolveMachineSpec(t *testing.T) { +func Test_ResolveServerSpec(t *testing.T) { const ( serverGroupID1 = "ce96e584-7ebc-46d6-9e55-987d72e3806c" imageID1 = "de96e584-7ebc-46d6-9e55-987d72e3806c" @@ -42,112 +46,85 @@ func Test_ResolveMachineSpec(t *testing.T) { subnetID = "32dc0e7f-34b6-4544-a69b-248955618736" ) - defaultPorts := []infrav1.ResolvedPortSpec{ + defaultPortSpec := []infrav1.ResolvedPortSpec{ { Name: "test-instance-0", Description: "Created by cluster-api-provider-openstack cluster test-cluster", NetworkID: networkID1, - FixedIPs: []infrav1.ResolvedFixedIP{ - {SubnetID: ptr.To(subnetID)}, + }, + } + + defaultPortOpts := []infrav1.PortOpts{ + { + Network: &infrav1.NetworkParam{ + ID: ptr.To(networkID1), }, }, } tests := []struct { testName string - spec infrav1.OpenStackMachineSpec + spec infrav1alpha1.OpenStackServerSpec managedSecurityGroup *string expectComputeMock func(m *mock.MockComputeClientMockRecorder) expectImageMock func(m *mock.MockImageClientMockRecorder) expectNetworkMock func(m *mock.MockNetworkClientMockRecorder) - before *infrav1.ResolvedMachineSpec - want *infrav1.ResolvedMachineSpec + before *infrav1alpha1.ResolvedServerSpec + want *infrav1alpha1.ResolvedServerSpec wantErr bool }{ { testName: "Resources ID passed", - spec: infrav1.OpenStackMachineSpec{ + spec: infrav1alpha1.OpenStackServerSpec{ ServerGroup: &infrav1.ServerGroupParam{ID: ptr.To(serverGroupID1)}, Image: infrav1.ImageParam{ID: ptr.To(imageID1)}, + Ports: defaultPortOpts, }, - want: &infrav1.ResolvedMachineSpec{ + want: &infrav1alpha1.ResolvedServerSpec{ ImageID: imageID1, ServerGroupID: serverGroupID1, - Ports: defaultPorts, - }, - }, - { - testName: "Only image ID passed: want image id and default ports", - spec: infrav1.OpenStackMachineSpec{ - Image: infrav1.ImageParam{ID: ptr.To(imageID1)}, - }, - want: &infrav1.ResolvedMachineSpec{ - ImageID: imageID1, - Ports: defaultPorts, - }, - }, - { - testName: "Server group empty", - spec: infrav1.OpenStackMachineSpec{ - Image: infrav1.ImageParam{ID: ptr.To(imageID1)}, - ServerGroup: nil, - }, - want: &infrav1.ResolvedMachineSpec{ - ImageID: imageID1, - Ports: defaultPorts, + Ports: defaultPortSpec, }, }, { testName: "Server group by Name not found", - spec: infrav1.OpenStackMachineSpec{ - Image: infrav1.ImageParam{ID: ptr.To(imageID1)}, + spec: infrav1alpha1.OpenStackServerSpec{ ServerGroup: &infrav1.ServerGroupParam{Filter: &infrav1.ServerGroupFilter{Name: ptr.To("test-server-group")}}, + Image: infrav1.ImageParam{ID: ptr.To(imageID1)}, + Ports: defaultPortOpts, }, + want: &infrav1alpha1.ResolvedServerSpec{}, expectComputeMock: func(m *mock.MockComputeClientMockRecorder) { m.ListServerGroups().Return( []servergroups.ServerGroup{}, nil) }, - want: &infrav1.ResolvedMachineSpec{}, wantErr: true, }, { testName: "Image by Name not found", - spec: infrav1.OpenStackMachineSpec{ + spec: infrav1alpha1.OpenStackServerSpec{ Image: infrav1.ImageParam{ Filter: &infrav1.ImageFilter{ Name: ptr.To("test-image"), }, }, + Ports: defaultPortOpts, }, expectImageMock: func(m *mock.MockImageClientMockRecorder) { m.ListImages(images.ListOpts{Name: "test-image"}).Return([]images.Image{}, nil) }, - want: &infrav1.ResolvedMachineSpec{}, + want: &infrav1alpha1.ResolvedServerSpec{}, wantErr: true, }, { - testName: "Ports set", - spec: infrav1.OpenStackMachineSpec{ + testName: "Resolved ports length mismatch", + spec: infrav1alpha1.OpenStackServerSpec{ Image: infrav1.ImageParam{ID: ptr.To(imageID1)}, - Ports: []infrav1.PortOpts{ - { - Network: &infrav1.NetworkParam{ - ID: ptr.To(networkID2), - }, - }, - }, - }, - want: &infrav1.ResolvedMachineSpec{ - ImageID: imageID1, - Ports: []infrav1.ResolvedPortSpec{ - { - Name: "test-instance-0", - Description: "Created by cluster-api-provider-openstack cluster test-cluster", - NetworkID: networkID2, - }, - }, + Ports: slices.Concat(defaultPortOpts, defaultPortOpts), }, + want: &infrav1alpha1.ResolvedServerSpec{}, + wantErr: true, }, } for i, tt := range tests { @@ -168,30 +145,25 @@ func Test_ResolveMachineSpec(t *testing.T) { tt.expectNetworkMock(mockScopeFactory.NetworkClient.EXPECT()) } - openStackCluster := &infrav1.OpenStackCluster{ - Status: infrav1.OpenStackClusterStatus{ - Network: &infrav1.NetworkStatusWithSubnets{ - NetworkStatus: infrav1.NetworkStatus{ - ID: networkID1, - }, - Subnets: []infrav1.Subnet{ - { - ID: subnetID, - }, - }, - }, - }, - } - resources := tt.before if resources == nil { - resources = &infrav1.ResolvedMachineSpec{} + resources = &infrav1alpha1.ResolvedServerSpec{} + } + openStackServer := &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-instance", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test-cluster", + }, + }, + Spec: tt.spec, + Status: infrav1alpha1.OpenStackServerStatus{ + Resolved: resources, + }, } - clusterResourceName := "test-cluster" - baseName := "test-instance" scope := scope.NewWithLogger(mockScopeFactory, log) - _, err := ResolveMachineSpec(scope, &tt.spec, resources, clusterResourceName, baseName, openStackCluster, tt.managedSecurityGroup) + _, err := ResolveServerSpec(scope, openStackServer) if tt.wantErr { g.Expect(err).Error() return diff --git a/pkg/cloud/services/compute/machine_resources.go b/pkg/cloud/services/compute/server_resources.go similarity index 73% rename from pkg/cloud/services/compute/machine_resources.go rename to pkg/cloud/services/compute/server_resources.go index 6619d2ea72..c9db2f1bab 100644 --- a/pkg/cloud/services/compute/machine_resources.go +++ b/pkg/cloud/services/compute/server_resources.go @@ -17,16 +17,16 @@ limitations under the License. package compute import ( - infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) -func AdoptMachineResources(scope *scope.WithLogger, resolved *infrav1.ResolvedMachineSpec, resources *infrav1.MachineResources) error { +func AdoptServerResources(scope *scope.WithLogger, resolved *infrav1alpha1.ResolvedServerSpec, resources *infrav1alpha1.ServerResources) error { networkingService, err := networking.NewService(scope) if err != nil { return err } - return networkingService.AdoptPorts(scope, resolved.Ports, resources) + return networkingService.AdoptPortsServer(scope, resolved.Ports, resources) } diff --git a/pkg/cloud/services/networking/port.go b/pkg/cloud/services/networking/port.go index d17fa18c06..2603c11a8a 100644 --- a/pkg/cloud/services/networking/port.go +++ b/pkg/cloud/services/networking/port.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/ptr" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/record" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" @@ -324,7 +325,7 @@ func getPortName(baseName string, portSpec *infrav1.PortOpts, netIndex int) stri return fmt.Sprintf("%s-%d", baseName, netIndex) } -func (s *Service) CreatePorts(eventObject runtime.Object, desiredPorts []infrav1.ResolvedPortSpec, resources *infrav1.MachineResources) error { +func (s *Service) CreatePorts(eventObject runtime.Object, desiredPorts []infrav1.ResolvedPortSpec, resources *infrav1alpha1.ServerResources) error { for i := range desiredPorts { // Skip creation of ports which already exist if i < len(resources.Ports) { @@ -551,9 +552,10 @@ func (s *Service) IsTrunkExtSupported() (trunknSupported bool, err error) { return true, nil } -// AdoptPorts looks for ports in desiredPorts which were previously created, and adds them to resources.Ports. +// AdoptPortsServer looks for ports in desiredPorts which were previously created, and adds them to resources.Ports. // A port matches if it has the same name and network ID as the desired port. -func (s *Service) AdoptPorts(scope *scope.WithLogger, desiredPorts []infrav1.ResolvedPortSpec, resources *infrav1.MachineResources) error { +// TODO(emilien): remove this function: https://github.com/kubernetes-sigs/cluster-api-provider-openstack/pull/2071 +func (s *Service) AdoptPortsServer(scope *scope.WithLogger, desiredPorts []infrav1.ResolvedPortSpec, resources *infrav1alpha1.ServerResources) error { // We can skip adoption if the ports are already in the status if len(desiredPorts) == len(resources.Ports) { return nil diff --git a/pkg/cloud/services/networking/port_test.go b/pkg/cloud/services/networking/port_test.go index 7fd6ec79fd..756041819f 100644 --- a/pkg/cloud/services/networking/port_test.go +++ b/pkg/cloud/services/networking/port_test.go @@ -35,6 +35,7 @@ import ( "go.uber.org/mock/gomock" "k8s.io/utils/ptr" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" @@ -876,7 +877,7 @@ func Test_getPortName(t *testing.T) { } } -func Test_AdoptPorts(t *testing.T) { +func Test_AdoptPortsServer(t *testing.T) { const ( networkID1 = "5e8e0d3b-7f3d-4f3e-8b3f-3e3e3e3e3e3e" networkID2 = "0a4ff38e-1e03-4b4e-994c-c8ae38a2915e" @@ -888,9 +889,9 @@ func Test_AdoptPorts(t *testing.T) { tests := []struct { testName string desiredPorts []infrav1.ResolvedPortSpec - resources infrav1.MachineResources + resources infrav1alpha1.ServerResources expect func(*mock.MockNetworkClientMockRecorder) - want infrav1.MachineResources + want infrav1alpha1.ServerResources wantErr bool }{ { @@ -901,14 +902,14 @@ func Test_AdoptPorts(t *testing.T) { desiredPorts: []infrav1.ResolvedPortSpec{ {NetworkID: networkID1}, }, - resources: infrav1.MachineResources{ + resources: infrav1alpha1.ServerResources{ Ports: []infrav1.PortStatus{ { ID: portID1, }, }, }, - want: infrav1.MachineResources{ + want: infrav1alpha1.ServerResources{ Ports: []infrav1.PortStatus{ { ID: portID1, @@ -916,106 +917,6 @@ func Test_AdoptPorts(t *testing.T) { }, }, }, - { - testName: "desired port not in status, exists: adopt", - desiredPorts: []infrav1.ResolvedPortSpec{ - {Name: "test-machine-0", NetworkID: networkID1}, - }, - expect: func(m *mock.MockNetworkClientMockRecorder) { - m.ListPort(ports.ListOpts{Name: "test-machine-0", NetworkID: networkID1}). - Return([]ports.Port{{ID: portID1}}, nil) - }, - want: infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: portID1, - }, - }, - }, - }, - { - testName: "desired port not in status, does not exist: ignore", - desiredPorts: []infrav1.ResolvedPortSpec{ - {Name: "test-machine-0", NetworkID: networkID1}, - }, - expect: func(m *mock.MockNetworkClientMockRecorder) { - m.ListPort(ports.ListOpts{Name: "test-machine-0", NetworkID: networkID1}). - Return(nil, nil) - }, - want: infrav1.MachineResources{}, - }, - { - testName: "2 desired ports, first in status, second exists: adopt second", - desiredPorts: []infrav1.ResolvedPortSpec{ - {Name: "test-machine-0", NetworkID: networkID1}, - {Name: "test-machine-1", NetworkID: networkID2}, - }, - resources: infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: portID1, - }, - }, - }, - expect: func(m *mock.MockNetworkClientMockRecorder) { - m.ListPort(ports.ListOpts{Name: "test-machine-1", NetworkID: networkID2}). - Return([]ports.Port{{ID: portID2}}, nil) - }, - want: infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - {ID: portID1}, - {ID: portID2}, - }, - }, - }, - { - testName: "3 desired ports, first in status, second does not exist: ignore, do no look for third", - desiredPorts: []infrav1.ResolvedPortSpec{ - {Name: "test-machine-0", NetworkID: networkID1}, - {Name: "test-machine-1", NetworkID: networkID2}, - {Name: "test-machine-2", NetworkID: networkID3}, - }, - resources: infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: portID1, - }, - }, - }, - expect: func(m *mock.MockNetworkClientMockRecorder) { - m.ListPort(ports.ListOpts{Name: "test-machine-1", NetworkID: networkID2}). - Return(nil, nil) - }, - want: infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - {ID: portID1}, - }, - }, - }, - { - testName: "3 desired ports with arbitrary names, first in status, second does not exist: ignore, do no look for third", - desiredPorts: []infrav1.ResolvedPortSpec{ - {Name: "test-machine-foo", NetworkID: networkID1}, - {Name: "test-machine-bar", NetworkID: networkID2}, - {Name: "test-machine-baz", NetworkID: networkID3}, - }, - resources: infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - { - ID: portID1, - }, - }, - }, - expect: func(m *mock.MockNetworkClientMockRecorder) { - m.ListPort(ports.ListOpts{Name: "test-machine-bar", NetworkID: networkID2}). - Return(nil, nil) - }, - want: infrav1.MachineResources{ - Ports: []infrav1.PortStatus{ - {ID: portID1}, - }, - }, - }, } for i := range tests { tt := &tests[i] @@ -1034,7 +935,7 @@ func Test_AdoptPorts(t *testing.T) { client: mockClient, } - err := s.AdoptPorts(scope.NewWithLogger(mockScopeFactory, log), + err := s.AdoptPortsServer(scope.NewWithLogger(mockScopeFactory, log), tt.desiredPorts, &tt.resources) if tt.wantErr { g.Expect(err).Error() diff --git a/pkg/generated/applyconfiguration/api/v1alpha1/openstackserver.go b/pkg/generated/applyconfiguration/api/v1alpha1/openstackserver.go new file mode 100644 index 0000000000..0f322cce0e --- /dev/null +++ b/pkg/generated/applyconfiguration/api/v1alpha1/openstackserver.go @@ -0,0 +1,258 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" + apiv1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + internal "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/internal" +) + +// OpenStackServerApplyConfiguration represents an declarative configuration of the OpenStackServer type for use +// with apply. +type OpenStackServerApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OpenStackServerSpecApplyConfiguration `json:"spec,omitempty"` + Status *OpenStackServerStatusApplyConfiguration `json:"status,omitempty"` +} + +// OpenStackServer constructs an declarative configuration of the OpenStackServer type for use with +// apply. +func OpenStackServer(name, namespace string) *OpenStackServerApplyConfiguration { + b := &OpenStackServerApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("OpenStackServer") + b.WithAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha1") + return b +} + +// ExtractOpenStackServer extracts the applied configuration owned by fieldManager from +// openStackServer. If no managedFields are found in openStackServer for fieldManager, a +// OpenStackServerApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// openStackServer must be a unmodified OpenStackServer API object that was retrieved from the Kubernetes API. +// ExtractOpenStackServer provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOpenStackServer(openStackServer *apiv1alpha1.OpenStackServer, fieldManager string) (*OpenStackServerApplyConfiguration, error) { + return extractOpenStackServer(openStackServer, fieldManager, "") +} + +// ExtractOpenStackServerStatus is the same as ExtractOpenStackServer except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOpenStackServerStatus(openStackServer *apiv1alpha1.OpenStackServer, fieldManager string) (*OpenStackServerApplyConfiguration, error) { + return extractOpenStackServer(openStackServer, fieldManager, "status") +} + +func extractOpenStackServer(openStackServer *apiv1alpha1.OpenStackServer, fieldManager string, subresource string) (*OpenStackServerApplyConfiguration, error) { + b := &OpenStackServerApplyConfiguration{} + err := managedfields.ExtractInto(openStackServer, internal.Parser().Type("io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.OpenStackServer"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(openStackServer.Name) + b.WithNamespace(openStackServer.Namespace) + + b.WithKind("OpenStackServer") + b.WithAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithKind(value string) *OpenStackServerApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithAPIVersion(value string) *OpenStackServerApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithName(value string) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithGenerateName(value string) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithNamespace(value string) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithUID(value types.UID) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithResourceVersion(value string) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithGeneration(value int64) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OpenStackServerApplyConfiguration) WithLabels(entries map[string]string) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OpenStackServerApplyConfiguration) WithAnnotations(entries map[string]string) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OpenStackServerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OpenStackServerApplyConfiguration) WithFinalizers(values ...string) *OpenStackServerApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *OpenStackServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithSpec(value *OpenStackServerSpecApplyConfiguration) *OpenStackServerApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OpenStackServerApplyConfiguration) WithStatus(value *OpenStackServerStatusApplyConfiguration) *OpenStackServerApplyConfiguration { + b.Status = value + return b +} diff --git a/pkg/generated/applyconfiguration/api/v1alpha1/openstackserverspec.go b/pkg/generated/applyconfiguration/api/v1alpha1/openstackserverspec.go new file mode 100644 index 0000000000..d07a3bbdbc --- /dev/null +++ b/pkg/generated/applyconfiguration/api/v1alpha1/openstackserverspec.go @@ -0,0 +1,201 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + v1beta1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/api/v1beta1" +) + +// OpenStackServerSpecApplyConfiguration represents an declarative configuration of the OpenStackServerSpec type for use +// with apply. +type OpenStackServerSpecApplyConfiguration struct { + AdditionalBlockDevices []v1beta1.AdditionalBlockDeviceApplyConfiguration `json:"additionalBlockDevices,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + ConfigDrive *bool `json:"configDrive,omitempty"` + Flavor *string `json:"flavor,omitempty"` + FloatingIPPoolRef *v1.TypedLocalObjectReference `json:"floatingIPPoolRef,omitempty"` + IdentityRef *v1beta1.OpenStackIdentityReferenceApplyConfiguration `json:"identityRef,omitempty"` + Image *v1beta1.ImageParamApplyConfiguration `json:"image,omitempty"` + Ports []v1beta1.PortOptsApplyConfiguration `json:"ports,omitempty"` + RootVolume *v1beta1.RootVolumeApplyConfiguration `json:"rootVolume,omitempty"` + SSHKeyName *string `json:"sshKeyName,omitempty"` + SecurityGroups []v1beta1.SecurityGroupParamApplyConfiguration `json:"securityGroups,omitempty"` + ServerGroup *v1beta1.ServerGroupParamApplyConfiguration `json:"serverGroup,omitempty"` + ServerMetadata []v1beta1.ServerMetadataApplyConfiguration `json:"serverMetadata,omitempty"` + Tags []string `json:"tags,omitempty"` + Trunk *bool `json:"trunk,omitempty"` + UserDataRef *v1.LocalObjectReference `json:"userDataRef,omitempty"` +} + +// OpenStackServerSpecApplyConfiguration constructs an declarative configuration of the OpenStackServerSpec type for use with +// apply. +func OpenStackServerSpec() *OpenStackServerSpecApplyConfiguration { + return &OpenStackServerSpecApplyConfiguration{} +} + +// WithAdditionalBlockDevices adds the given value to the AdditionalBlockDevices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AdditionalBlockDevices field. +func (b *OpenStackServerSpecApplyConfiguration) WithAdditionalBlockDevices(values ...*v1beta1.AdditionalBlockDeviceApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAdditionalBlockDevices") + } + b.AdditionalBlockDevices = append(b.AdditionalBlockDevices, *values[i]) + } + return b +} + +// WithAvailabilityZone sets the AvailabilityZone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailabilityZone field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithAvailabilityZone(value string) *OpenStackServerSpecApplyConfiguration { + b.AvailabilityZone = &value + return b +} + +// WithConfigDrive sets the ConfigDrive field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigDrive field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithConfigDrive(value bool) *OpenStackServerSpecApplyConfiguration { + b.ConfigDrive = &value + return b +} + +// WithFlavor sets the Flavor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Flavor field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithFlavor(value string) *OpenStackServerSpecApplyConfiguration { + b.Flavor = &value + return b +} + +// WithFloatingIPPoolRef sets the FloatingIPPoolRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FloatingIPPoolRef field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithFloatingIPPoolRef(value v1.TypedLocalObjectReference) *OpenStackServerSpecApplyConfiguration { + b.FloatingIPPoolRef = &value + return b +} + +// WithIdentityRef sets the IdentityRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IdentityRef field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithIdentityRef(value *v1beta1.OpenStackIdentityReferenceApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + b.IdentityRef = value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithImage(value *v1beta1.ImageParamApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + b.Image = value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *OpenStackServerSpecApplyConfiguration) WithPorts(values ...*v1beta1.PortOptsApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} + +// WithRootVolume sets the RootVolume field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RootVolume field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithRootVolume(value *v1beta1.RootVolumeApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + b.RootVolume = value + return b +} + +// WithSSHKeyName sets the SSHKeyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SSHKeyName field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithSSHKeyName(value string) *OpenStackServerSpecApplyConfiguration { + b.SSHKeyName = &value + return b +} + +// WithSecurityGroups adds the given value to the SecurityGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SecurityGroups field. +func (b *OpenStackServerSpecApplyConfiguration) WithSecurityGroups(values ...*v1beta1.SecurityGroupParamApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSecurityGroups") + } + b.SecurityGroups = append(b.SecurityGroups, *values[i]) + } + return b +} + +// WithServerGroup sets the ServerGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerGroup field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithServerGroup(value *v1beta1.ServerGroupParamApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + b.ServerGroup = value + return b +} + +// WithServerMetadata adds the given value to the ServerMetadata field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServerMetadata field. +func (b *OpenStackServerSpecApplyConfiguration) WithServerMetadata(values ...*v1beta1.ServerMetadataApplyConfiguration) *OpenStackServerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServerMetadata") + } + b.ServerMetadata = append(b.ServerMetadata, *values[i]) + } + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *OpenStackServerSpecApplyConfiguration) WithTags(values ...string) *OpenStackServerSpecApplyConfiguration { + for i := range values { + b.Tags = append(b.Tags, values[i]) + } + return b +} + +// WithTrunk sets the Trunk field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Trunk field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithTrunk(value bool) *OpenStackServerSpecApplyConfiguration { + b.Trunk = &value + return b +} + +// WithUserDataRef sets the UserDataRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserDataRef field is set to the value of the last call. +func (b *OpenStackServerSpecApplyConfiguration) WithUserDataRef(value v1.LocalObjectReference) *OpenStackServerSpecApplyConfiguration { + b.UserDataRef = &value + return b +} diff --git a/pkg/generated/applyconfiguration/api/v1alpha1/openstackserverstatus.go b/pkg/generated/applyconfiguration/api/v1alpha1/openstackserverstatus.go new file mode 100644 index 0000000000..a1312a352c --- /dev/null +++ b/pkg/generated/applyconfiguration/api/v1alpha1/openstackserverstatus.go @@ -0,0 +1,101 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + v1beta1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// OpenStackServerStatusApplyConfiguration represents an declarative configuration of the OpenStackServerStatus type for use +// with apply. +type OpenStackServerStatusApplyConfiguration struct { + Ready *bool `json:"ready,omitempty"` + InstanceID *string `json:"instanceID,omitempty"` + InstanceState *v1beta1.InstanceState `json:"instanceState,omitempty"` + Addresses []v1.NodeAddress `json:"addresses,omitempty"` + Resolved *ResolvedServerSpecApplyConfiguration `json:"resolved,omitempty"` + Resources *ServerResourcesApplyConfiguration `json:"resources,omitempty"` + Conditions *apiv1beta1.Conditions `json:"conditions,omitempty"` +} + +// OpenStackServerStatusApplyConfiguration constructs an declarative configuration of the OpenStackServerStatus type for use with +// apply. +func OpenStackServerStatus() *OpenStackServerStatusApplyConfiguration { + return &OpenStackServerStatusApplyConfiguration{} +} + +// WithReady sets the Ready field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ready field is set to the value of the last call. +func (b *OpenStackServerStatusApplyConfiguration) WithReady(value bool) *OpenStackServerStatusApplyConfiguration { + b.Ready = &value + return b +} + +// WithInstanceID sets the InstanceID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InstanceID field is set to the value of the last call. +func (b *OpenStackServerStatusApplyConfiguration) WithInstanceID(value string) *OpenStackServerStatusApplyConfiguration { + b.InstanceID = &value + return b +} + +// WithInstanceState sets the InstanceState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InstanceState field is set to the value of the last call. +func (b *OpenStackServerStatusApplyConfiguration) WithInstanceState(value v1beta1.InstanceState) *OpenStackServerStatusApplyConfiguration { + b.InstanceState = &value + return b +} + +// WithAddresses adds the given value to the Addresses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Addresses field. +func (b *OpenStackServerStatusApplyConfiguration) WithAddresses(values ...v1.NodeAddress) *OpenStackServerStatusApplyConfiguration { + for i := range values { + b.Addresses = append(b.Addresses, values[i]) + } + return b +} + +// WithResolved sets the Resolved field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resolved field is set to the value of the last call. +func (b *OpenStackServerStatusApplyConfiguration) WithResolved(value *ResolvedServerSpecApplyConfiguration) *OpenStackServerStatusApplyConfiguration { + b.Resolved = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *OpenStackServerStatusApplyConfiguration) WithResources(value *ServerResourcesApplyConfiguration) *OpenStackServerStatusApplyConfiguration { + b.Resources = value + return b +} + +// WithConditions sets the Conditions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Conditions field is set to the value of the last call. +func (b *OpenStackServerStatusApplyConfiguration) WithConditions(value apiv1beta1.Conditions) *OpenStackServerStatusApplyConfiguration { + b.Conditions = &value + return b +} diff --git a/pkg/generated/applyconfiguration/api/v1alpha1/resolvedserverspec.go b/pkg/generated/applyconfiguration/api/v1alpha1/resolvedserverspec.go new file mode 100644 index 0000000000..a4e2fe1805 --- /dev/null +++ b/pkg/generated/applyconfiguration/api/v1alpha1/resolvedserverspec.go @@ -0,0 +1,66 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1beta1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/api/v1beta1" +) + +// ResolvedServerSpecApplyConfiguration represents an declarative configuration of the ResolvedServerSpec type for use +// with apply. +type ResolvedServerSpecApplyConfiguration struct { + ServerGroupID *string `json:"serverGroupID,omitempty"` + ImageID *string `json:"imageID,omitempty"` + Ports []v1beta1.ResolvedPortSpecApplyConfiguration `json:"ports,omitempty"` +} + +// ResolvedServerSpecApplyConfiguration constructs an declarative configuration of the ResolvedServerSpec type for use with +// apply. +func ResolvedServerSpec() *ResolvedServerSpecApplyConfiguration { + return &ResolvedServerSpecApplyConfiguration{} +} + +// WithServerGroupID sets the ServerGroupID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerGroupID field is set to the value of the last call. +func (b *ResolvedServerSpecApplyConfiguration) WithServerGroupID(value string) *ResolvedServerSpecApplyConfiguration { + b.ServerGroupID = &value + return b +} + +// WithImageID sets the ImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageID field is set to the value of the last call. +func (b *ResolvedServerSpecApplyConfiguration) WithImageID(value string) *ResolvedServerSpecApplyConfiguration { + b.ImageID = &value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *ResolvedServerSpecApplyConfiguration) WithPorts(values ...*v1beta1.ResolvedPortSpecApplyConfiguration) *ResolvedServerSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/pkg/generated/applyconfiguration/api/v1alpha1/serverresources.go b/pkg/generated/applyconfiguration/api/v1alpha1/serverresources.go new file mode 100644 index 0000000000..9af6f6aba3 --- /dev/null +++ b/pkg/generated/applyconfiguration/api/v1alpha1/serverresources.go @@ -0,0 +1,48 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1beta1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/api/v1beta1" +) + +// ServerResourcesApplyConfiguration represents an declarative configuration of the ServerResources type for use +// with apply. +type ServerResourcesApplyConfiguration struct { + Ports []v1beta1.PortStatusApplyConfiguration `json:"ports,omitempty"` +} + +// ServerResourcesApplyConfiguration constructs an declarative configuration of the ServerResources type for use with +// apply. +func ServerResources() *ServerResourcesApplyConfiguration { + return &ServerResourcesApplyConfiguration{} +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *ServerResourcesApplyConfiguration) WithPorts(values ...*v1beta1.PortStatusApplyConfiguration) *ServerResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/pkg/generated/applyconfiguration/internal/internal.go b/pkg/generated/applyconfiguration/internal/internal.go index fda918a270..10e3a491be 100644 --- a/pkg/generated/applyconfiguration/internal/internal.go +++ b/pkg/generated/applyconfiguration/internal/internal.go @@ -39,6 +39,13 @@ func Parser() *typed.Parser { var parserOnce sync.Once var parser *typed.Parser var schemaYAML = typed.YAMLObject(`types: +- name: io.k8s.api.core.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + elementRelationship: atomic - name: io.k8s.api.core.v1.NodeAddress map: fields: @@ -192,6 +199,156 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic - name: io.k8s.apimachinery.pkg.apis.meta.v1.Time scalar: untyped +- name: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.OpenStackServer + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.OpenStackServerSpec + default: {} + - name: status + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.OpenStackServerStatus + default: {} +- name: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.OpenStackServerSpec + map: + fields: + - name: additionalBlockDevices + type: + list: + elementType: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.AdditionalBlockDevice + elementRelationship: associative + keys: + - name + - name: availabilityZone + type: + scalar: string + - name: configDrive + type: + scalar: boolean + - name: flavor + type: + scalar: string + default: "" + - name: floatingIPPoolRef + type: + namedType: io.k8s.api.core.v1.TypedLocalObjectReference + - name: identityRef + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.OpenStackIdentityReference + default: {} + - name: image + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.ImageParam + default: {} + - name: ports + type: + list: + elementType: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.PortOpts + elementRelationship: atomic + - name: rootVolume + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.RootVolume + - name: securityGroups + type: + list: + elementType: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.SecurityGroupParam + elementRelationship: atomic + - name: serverGroup + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.ServerGroupParam + - name: serverMetadata + type: + list: + elementType: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.ServerMetadata + elementRelationship: associative + keys: + - key + - name: sshKeyName + type: + scalar: string + default: "" + - name: tags + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: trunk + type: + scalar: boolean + - name: userDataRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference +- name: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.OpenStackServerStatus + map: + fields: + - name: addresses + type: + list: + elementType: + namedType: io.k8s.api.core.v1.NodeAddress + elementRelationship: atomic + - name: conditions + type: + list: + elementType: + namedType: io.k8s.sigs.cluster-api.api.v1beta1.Condition + elementRelationship: atomic + - name: instanceID + type: + scalar: string + - name: instanceState + type: + scalar: string + - name: ready + type: + scalar: boolean + default: false + - name: resolved + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.ResolvedServerSpec + - name: resources + type: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.ServerResources +- name: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.ResolvedServerSpec + map: + fields: + - name: imageID + type: + scalar: string + - name: ports + type: + list: + elementType: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.ResolvedPortSpec + elementRelationship: atomic + - name: serverGroupID + type: + scalar: string +- name: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha1.ServerResources + map: + fields: + - name: ports + type: + list: + elementType: + namedType: io.k8s.sigs.cluster-api-provider-openstack.api.v1beta1.PortStatus + elementRelationship: atomic - name: io.k8s.sigs.cluster-api-provider-openstack.api.v1alpha6.APIServerLoadBalancer map: fields: diff --git a/pkg/generated/applyconfiguration/utils.go b/pkg/generated/applyconfiguration/utils.go index 8dcd990c57..4ca2627559 100644 --- a/pkg/generated/applyconfiguration/utils.go +++ b/pkg/generated/applyconfiguration/utils.go @@ -20,9 +20,11 @@ package applyconfiguration import ( schema "k8s.io/apimachinery/pkg/runtime/schema" + v1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" v1alpha6 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6" v1alpha7 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" v1beta1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" + apiv1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/api/v1alpha1" apiv1alpha6 "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/api/v1alpha6" apiv1alpha7 "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/api/v1alpha7" apiv1beta1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/generated/applyconfiguration/api/v1beta1" @@ -32,7 +34,19 @@ import ( // apply configuration type exists for the given GroupVersionKind. func ForKind(kind schema.GroupVersionKind) interface{} { switch kind { - // Group=infrastructure.cluster.x-k8s.io, Version=v1alpha6 + // Group=infrastructure.cluster.x-k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithKind("OpenStackServer"): + return &apiv1alpha1.OpenStackServerApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("OpenStackServerSpec"): + return &apiv1alpha1.OpenStackServerSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("OpenStackServerStatus"): + return &apiv1alpha1.OpenStackServerStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ResolvedServerSpec"): + return &apiv1alpha1.ResolvedServerSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ServerResources"): + return &apiv1alpha1.ServerResourcesApplyConfiguration{} + + // Group=infrastructure.cluster.x-k8s.io, Version=v1alpha6 case v1alpha6.SchemeGroupVersion.WithKind("AddressPair"): return &apiv1alpha6.AddressPairApplyConfiguration{} case v1alpha6.SchemeGroupVersion.WithKind("APIServerLoadBalancer"): diff --git a/pkg/webhooks/fuzz_test.go b/pkg/webhooks/fuzz_test.go index c4e246a78e..67f9abc98a 100644 --- a/pkg/webhooks/fuzz_test.go +++ b/pkg/webhooks/fuzz_test.go @@ -27,6 +27,7 @@ import ( utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/controller-runtime/pkg/webhook" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" ) @@ -90,3 +91,7 @@ func Test_FuzzMachineWebhook(t *testing.T) { func Test_FuzzMachineTemplateWebhook(t *testing.T) { fuzzCustomValidator[infrav1.OpenStackMachineTemplate](t, "OpenStackMachineTemplate", &openStackMachineTemplateWebhook{}) } + +func Test_FuzzServerWebhook(t *testing.T) { + fuzzCustomValidator[infrav1alpha1.OpenStackServer](t, "OpenStackServer", &openStackServerWebhook{}) +} diff --git a/pkg/webhooks/openstackserver_webhook.go b/pkg/webhooks/openstackserver_webhook.go new file mode 100644 index 0000000000..334c60a798 --- /dev/null +++ b/pkg/webhooks/openstackserver_webhook.go @@ -0,0 +1,130 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + "context" + "fmt" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/cluster-api/util/topology" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" +) + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha1-openstackserver,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=openstackservers,versions=v1alpha1,name=validation.openstackserver.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1 + +func SetupOpenStackServerWebhook(mgr manager.Manager) error { + return builder.WebhookManagedBy(mgr). + For(&infrav1alpha1.OpenStackServer{}). + WithValidator(&openStackServerWebhook{}). + Complete() +} + +type openStackServerWebhook struct{} + +// Compile-time assertion that openStackServerWebhook implements webhook.CustomValidator. +var _ webhook.CustomValidator = &openStackServerWebhook{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type. +func (*openStackServerWebhook) ValidateCreate(_ context.Context, objRaw runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + newObj, err := castToOpenStackServer(objRaw) + if err != nil { + return nil, err + } + + if newObj.Spec.RootVolume != nil && newObj.Spec.AdditionalBlockDevices != nil { + for _, device := range newObj.Spec.AdditionalBlockDevices { + if device.Name == "root" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "additionalBlockDevices"), "cannot contain a device named \"root\" when rootVolume is set")) + } + } + } + + return aggregateObjErrors(newObj.GroupVersionKind().GroupKind(), newObj.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type. +func (*openStackServerWebhook) ValidateUpdate(ctx context.Context, oldObjRaw, newObjRaw runtime.Object) (admission.Warnings, error) { + oldObj, err := castToOpenStackServer(oldObjRaw) + if err != nil { + return nil, err + } + + newObj, err := castToOpenStackServer(newObjRaw) + if err != nil { + return nil, err + } + + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a admission.Request inside context: %v", err)) + } + + newOpenStackServer, err := runtime.DefaultUnstructuredConverter.ToUnstructured(newObj) + if err != nil { + return nil, apierrors.NewInvalid(infrav1.SchemeGroupVersion.WithKind("OpenStackServer").GroupKind(), newObj.Name, field.ErrorList{ + field.InternalError(nil, fmt.Errorf("failed to convert new OpenStackServer to unstructured object: %w", err)), + }) + } + oldOpenStackServer, err := runtime.DefaultUnstructuredConverter.ToUnstructured(oldObjRaw) + if err != nil { + return nil, apierrors.NewInvalid(infrav1.SchemeGroupVersion.WithKind("OpenStackServer").GroupKind(), newObj.Name, field.ErrorList{ + field.InternalError(nil, fmt.Errorf("failed to convert old OpenStackServer to unstructured object: %w", err)), + }) + } + + var allErrs field.ErrorList + + newOpenStackServerSpec := newOpenStackServer["spec"].(map[string]interface{}) + oldOpenStackServerSpec := oldOpenStackServer["spec"].(map[string]interface{}) + + // allow changes to identifyRef + delete(oldOpenStackServerSpec, "identityRef") + delete(newOpenStackServerSpec, "identityRef") + + if !topology.ShouldSkipImmutabilityChecks(req, newObj) && + !reflect.DeepEqual(newObj.Spec, oldObj.Spec) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec"), "OpenStackServer spec field is immutable. Please create a new resource instead."), + ) + } + + return aggregateObjErrors(newObj.GroupVersionKind().GroupKind(), newObj.Name, allErrs) +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type. +func (*openStackServerWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +func castToOpenStackServer(obj runtime.Object) (*infrav1alpha1.OpenStackServer, error) { + cast, ok := obj.(*infrav1alpha1.OpenStackServer) + if !ok { + return nil, fmt.Errorf("expected an OpenStackServer but got a %T", obj) + } + return cast, nil +} diff --git a/pkg/webhooks/openstackserver_webhook_test.go b/pkg/webhooks/openstackserver_webhook_test.go new file mode 100644 index 0000000000..d82c3f9046 --- /dev/null +++ b/pkg/webhooks/openstackserver_webhook_test.go @@ -0,0 +1,132 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhooks + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" //nolint:revive + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" +) + +func TestOpenStackServer_ValidateUpdate(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + old *infrav1alpha1.OpenStackServer + new *infrav1alpha1.OpenStackServer + req *admission.Request + wantErr bool + }{ + { + name: "OpenStackServer with immutable spec", + old: &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "foo", + }, + }, + new: &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "new", + }, + }, + req: &admission.Request{}, + wantErr: true, + }, + { + name: "OpenStackServer with mutable metadata", + old: &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "foo", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + new: &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "foo", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + }, + req: &admission.Request{}, + }, + { + name: "don't allow modification, dry run, no skip immutability annotation set", + old: &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "foo", + }, + }, + new: &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "new", + }, + }, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, + wantErr: true, + }, + { + name: "allow modification, dry run, skip immutability annotation set", + old: &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "foo", + }, + }, + new: &infrav1alpha1.OpenStackServer{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.TopologyDryRunAnnotation: "", + }, + }, + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "new", + }, + }, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + webhook := &openStackServerWebhook{} + ctx := admission.NewContextWithRequest(context.Background(), *tt.req) + + warn, err := webhook.ValidateUpdate(ctx, tt.old, tt.new) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + // Nothing emits warnings yet + g.Expect(warn).To(BeEmpty()) + }) + } +} diff --git a/pkg/webhooks/register.go b/pkg/webhooks/register.go index 02ad4608ed..b2b7fd13dc 100644 --- a/pkg/webhooks/register.go +++ b/pkg/webhooks/register.go @@ -39,6 +39,7 @@ func RegisterAllWithManager(mgr manager.Manager) []error { {"OpenStackClusterTemplate", SetupOpenStackClusterTemplateWebhook}, {"OpenStackMachine", SetupOpenStackMachineWebhook}, {"OpenStackMachineTemplate", SetupOpenStackMachineTemplateWebhook}, + {"OpenStackServer", SetupOpenStackServerWebhook}, } { if err := webhook.setup(mgr); err != nil { errs = append(errs, fmt.Errorf("creating webhook for %s: %v", webhook.name, err)) diff --git a/test/e2e/suites/apivalidations/openstackserver_test.go b/test/e2e/suites/apivalidations/openstackserver_test.go new file mode 100644 index 0000000000..c4527624da --- /dev/null +++ b/test/e2e/suites/apivalidations/openstackserver_test.go @@ -0,0 +1,317 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apivalidations + +import ( + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" +) + +var _ = Describe("OpenStackServer API validations", func() { + var namespace *corev1.Namespace + + defaultServer := func() *infrav1alpha1.OpenStackServer { + // Initialise a basic server object in the correct namespace + server := &infrav1alpha1.OpenStackServer{ + Spec: infrav1alpha1.OpenStackServerSpec{ + Flavor: "test-flavor", + IdentityRef: infrav1.OpenStackIdentityReference{ + Name: "test-identity", + CloudName: "test-cloud", + }, + Image: infrav1.ImageParam{Filter: &infrav1.ImageFilter{Name: ptr.To("test-image")}}, + Ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkParam{ + Filter: &infrav1.NetworkFilter{ + Name: "test-network", + }, + }, + }, + }, + }, + } + server.Namespace = namespace.Name + server.GenerateName = "openstackserver-" + return server + } + + BeforeEach(func() { + namespace = createNamespace() + }) + + It("should allow to create a server with correct spec", func() { + server := defaultServer() + + By("Creating the smallest permissible server spec") + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "OpenStackServer creation should succeed") + }) + + It("should not allow the identityRef to be set several times", func() { + server := defaultServer() + + By("Creating a bare server") + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "OpenStackserver creation should succeed") + + By("Setting the identityRef") + server.Spec.IdentityRef = infrav1.OpenStackIdentityReference{Name: "foo", CloudName: "staging"} + Expect(k8sClient.Update(ctx, server)).NotTo(Succeed(), "OpenStackserver update should fail") + }) + + It("should not allow server metadata to exceed 255 characters", func() { + server := defaultServer() + + By("Creating a server with a metadata key that is too long") + server.Spec.ServerMetadata = []infrav1.ServerMetadata{ + { + Key: strings.Repeat("a", 256), + Value: "value", + }, + } + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a long metadata key should fail") + + By("Creating a server with a metadata value that is too long") + server.Spec.ServerMetadata = []infrav1.ServerMetadata{ + { + Key: "key", + Value: strings.Repeat("a", 256), + }, + } + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a long metadata value should fail") + + By("Creating a server with a metadata key and value of 255 characters should succeed") + server.Spec.ServerMetadata = []infrav1.ServerMetadata{ + { + Key: strings.Repeat("a", 255), + Value: strings.Repeat("b", 255), + }, + } + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "Creating a server with max metadata key and value should succeed") + }) + + Context("volumes", func() { + It("should not allow volume with zero size", func() { + server := defaultServer() + server.Spec.RootVolume = &infrav1.RootVolume{ + SizeGiB: 0, + } + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a zero size root volume should fail") + + server = defaultServer() + server.Spec.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "test-volume", + SizeGiB: 0, + }, + } + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a zero size additional block device should fail") + }) + + It("should allow to create server with spec.RootVolume and non-root device name in spec.AdditionalBlockDevices", func() { + server := defaultServer() + server.Spec.RootVolume = &infrav1.RootVolume{SizeGiB: 50, BlockDeviceVolume: infrav1.BlockDeviceVolume{}} + server.Spec.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + {Name: "user", SizeGiB: 30, Storage: infrav1.BlockDeviceStorage{}}, + } + + By("Creating a server with spec.RootVolume and non-root device name in spec.AdditionalBlockDevices") + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "OpenStackserver creation with non-root device name in spec.AdditionalBlockDevices should succeed") + }) + + It("should not allow to create server with spec.RootVolume and root device name in spec.AdditionalBlockDevices", func() { + server := defaultServer() + server.Spec.RootVolume = &infrav1.RootVolume{SizeGiB: 50, BlockDeviceVolume: infrav1.BlockDeviceVolume{}} + server.Spec.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + {Name: "root", SizeGiB: 30, Storage: infrav1.BlockDeviceStorage{}}, + } + + By("Creating a server with spec.RootVolume and root device name in spec.AdditionalBlockDevices") + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "OpenStackserver creation with root device name in spec.AdditionalBlockDevices should not succeed") + }) + + /* FIXME: These tests are failing + It("should not allow additional volume with empty name", func() { + server.Spec.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "", + SizeGiB: 1, + }, + } + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with an empty name additional block device should fail") + }) + + It("should not allow additional volume with name root", func() { + server.Spec.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "root", + SizeGiB: 1, + }, + } + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a root named additional block device should fail") + }) + */ + + It("should not allow additional volume with duplicate name", func() { + server := defaultServer() + server.Spec.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "test-volume", + SizeGiB: 1, + }, + { + Name: "test-volume", + SizeGiB: 2, + }, + } + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with duplicate named additional block device should fail") + }) + + defaultserverWithRootVolumeAZ := func(az *infrav1.VolumeAvailabilityZone) *infrav1alpha1.OpenStackServer { + server := defaultServer() + server.Spec.RootVolume = &infrav1.RootVolume{ + SizeGiB: 1, + } + server.Spec.RootVolume.AvailabilityZone = az + return server + } + + defaultserverWithAdditionBlockDeviceAZ := func(az *infrav1.VolumeAvailabilityZone) *infrav1alpha1.OpenStackServer { + server := defaultServer() + server.Spec.AdditionalBlockDevices = []infrav1.AdditionalBlockDevice{ + { + Name: "test-volume", + SizeGiB: 1, + Storage: infrav1.BlockDeviceStorage{ + Type: infrav1.VolumeBlockDevice, + Volume: &infrav1.BlockDeviceVolume{ + AvailabilityZone: az, + }, + }, + }, + } + return server + } + + It("should allow volume with defaulted AZ from", func() { + azName := infrav1.VolumeAZName("test-az") + az := infrav1.VolumeAvailabilityZone{ + Name: &azName, + } + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "Creating a server with a root volume with an availability zone should succeed") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "Creating a server with an additional block device with an availability zone should succeed") + }) + + It("should allow volume with AZ from Name", func() { + azName := infrav1.VolumeAZName("test-az") + az := infrav1.VolumeAvailabilityZone{ + From: infrav1.VolumeAZFromName, + Name: &azName, + } + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "Creating a server with a root volume with an availability zone should succeed") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "Creating a server with an additional block device with an availability zone should succeed") + }) + + It("should allow volume AZ from server", func() { + az := infrav1.VolumeAvailabilityZone{ + From: infrav1.VolumeAZFromMachine, + } + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "Creating a server with a root volume with an availability zone should succeed") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).To(Succeed(), "Creating a server with an additional block device with an availability zone should succeed") + }) + + It("should not allow volume AZ with invalid from", func() { + az := infrav1.VolumeAvailabilityZone{ + From: "invalid", + } + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a root volume with an invalid availability zone should fail") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with an additional block device with an invalid availability zone should fail") + }) + + It("should not allow empty volume AZ", func() { + az := infrav1.VolumeAvailabilityZone{} + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a root volume with an empty availability zone should fail") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with an additional block device with an empty availability zone should fail") + }) + + It("should not allow volume AZ from Name with missing name", func() { + az := infrav1.VolumeAvailabilityZone{ + From: infrav1.VolumeAZFromName, + } + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a root volume with a missing name availability zone should fail") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with an additional block device with a missing name availability zone should fail") + }) + + It("should not allow volume AZ from server with name", func() { + azName := infrav1.VolumeAZName("test-az") + az := infrav1.VolumeAvailabilityZone{ + From: infrav1.VolumeAZFromMachine, + Name: &azName, + } + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a root volume with a name availability zone should fail") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with an additional block device with a name availability zone should fail") + }) + + It("should not allow volume AZ from Name with empty name", func() { + azName := infrav1.VolumeAZName("") + az := infrav1.VolumeAvailabilityZone{ + From: infrav1.VolumeAZFromName, + Name: &azName, + } + + server := defaultserverWithRootVolumeAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with a root volume with an empty name availability zone should fail") + + server = defaultserverWithAdditionBlockDeviceAZ(&az) + Expect(k8sClient.Create(ctx, server)).NotTo(Succeed(), "Creating a server with an additional block device with an empty name availability zone should fail") + }) + }) +}) diff --git a/test/e2e/suites/e2e/e2e_test.go b/test/e2e/suites/e2e/e2e_test.go index b85b5466c6..25b0719e7c 100644 --- a/test/e2e/suites/e2e/e2e_test.go +++ b/test/e2e/suites/e2e/e2e_test.go @@ -166,6 +166,7 @@ var _ = Describe("e2e tests [PR-Blocking]", func() { openStackCluster, err = shared.ClusterForSpec(ctx, e2eCtx, namespace) Expect(err).NotTo(HaveOccurred()) bastionSpec := openStackCluster.Spec.Bastion + bastionFlavor := openStackCluster.Spec.Bastion.Spec.Flavor Expect(openStackCluster.Status.Bastion).NotTo(BeNil(), "OpenStackCluster.Status.Bastion has not been populated") bastionServerName := openStackCluster.Status.Bastion.Name bastionServer, err := shared.DumpOpenStackServers(e2eCtx, servers.ListOpts{Name: bastionServerName}) @@ -198,9 +199,16 @@ var _ = Describe("e2e tests [PR-Blocking]", func() { return false, errors.New("Bastion was not removed in OpenStackCluster.Status") }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-bastion")..., ).Should(BeTrue()) - securityGroupsList, err = shared.DumpOpenStackSecurityGroups(e2eCtx, groups.ListOpts{Tags: clusterName}) - Expect(err).NotTo(HaveOccurred()) - Expect(securityGroupsList).To(HaveLen(2)) + Eventually( + func() (bool, error) { + securityGroupsList, err = shared.DumpOpenStackSecurityGroups(e2eCtx, groups.ListOpts{Tags: clusterName}) + Expect(err).NotTo(HaveOccurred()) + if len(securityGroupsList) == 2 { + return true, nil + } + return false, errors.New("Security group for bastion was not removed in OpenStack") + }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-bastion")..., + ).Should(BeTrue()) shared.Logf("Delete the bastion") openStackCluster, err = shared.ClusterForSpec(ctx, e2eCtx, namespace) @@ -244,7 +252,52 @@ var _ = Describe("e2e tests [PR-Blocking]", func() { openStackCluster, err = shared.ClusterForSpec(ctx, e2eCtx, namespace) Expect(err).NotTo(HaveOccurred()) Expect(openStackCluster.Spec.Bastion).To(Equal(openStackClusterWithNewBastionFlavor.Spec.Bastion)) - Expect(openStackCluster.Status.Bastion).NotTo(BeNil(), "OpenStackCluster.Status.Bastion with new flavor has not been populated") + Eventually( + func() (bool, error) { + openStackCluster, err = shared.ClusterForSpec(ctx, e2eCtx, namespace) + Expect(err).NotTo(HaveOccurred()) + if openStackCluster.Status.Bastion != nil { + return true, nil + } + return false, errors.New("Bastion status is nil in OpenStackCluster.Status") + }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-bastion")..., + ).Should(BeTrue()) + securityGroupsList, err = shared.DumpOpenStackSecurityGroups(e2eCtx, groups.ListOpts{Tags: clusterName}) + Expect(err).NotTo(HaveOccurred()) + Expect(securityGroupsList).To(HaveLen(3)) + + shared.Logf("Change the bastion spec with the original flavor") + bastionOriginalFlavor, err := shared.GetFlavorFromName(e2eCtx, bastionFlavor) + Expect(err).NotTo(HaveOccurred()) + openStackCluster, err = shared.ClusterForSpec(ctx, e2eCtx, namespace) + Expect(err).NotTo(HaveOccurred()) + openStackClusterWithOriginalBastionFlavor := openStackCluster.DeepCopy() + openStackClusterWithOriginalBastionFlavor.Spec.Bastion = bastionSpec + openStackClusterWithOriginalBastionFlavor.Spec.Bastion.Spec.Flavor = bastionFlavor + Expect(e2eCtx.Environment.BootstrapClusterProxy.GetClient().Update(ctx, openStackClusterWithOriginalBastionFlavor)).To(Succeed()) + Eventually( + func() (bool, error) { + bastionServer, err := shared.DumpOpenStackServers(e2eCtx, servers.ListOpts{Name: bastionServerName, Flavor: bastionOriginalFlavor.ID}) + Expect(err).NotTo(HaveOccurred()) + if len(bastionServer) == 1 { + return true, nil + } + return false, errors.New("Bastion with original flavor was not created in OpenStack") + }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-bastion")..., + ).Should(BeTrue()) + openStackCluster, err = shared.ClusterForSpec(ctx, e2eCtx, namespace) + Expect(err).NotTo(HaveOccurred()) + Expect(openStackCluster.Spec.Bastion).To(Equal(openStackClusterWithOriginalBastionFlavor.Spec.Bastion)) + Eventually( + func() (bool, error) { + openStackCluster, err = shared.ClusterForSpec(ctx, e2eCtx, namespace) + Expect(err).NotTo(HaveOccurred()) + if openStackCluster.Status.Bastion != nil { + return true, nil + } + return false, errors.New("Bastion status is nil in OpenStackCluster.Status") + }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-bastion")..., + ).Should(BeTrue()) securityGroupsList, err = shared.DumpOpenStackSecurityGroups(e2eCtx, groups.ListOpts{Tags: clusterName}) Expect(err).NotTo(HaveOccurred()) Expect(securityGroupsList).To(HaveLen(3))