diff --git a/Makefile b/Makefile index c1968fe83a..e25fb5badb 100644 --- a/Makefile +++ b/Makefile @@ -257,6 +257,7 @@ generate-go: $(MOCKGEN) paths=./api/... \ object:headerFile=./hack/boilerplate/boilerplate.generatego.txt $(CONVERSION_GEN) \ + --input-dirs=./api/v1alpha1 \ --input-dirs=./api/v1alpha5 \ --input-dirs=./api/v1alpha6 \ --input-dirs=./api/v1alpha7 \ diff --git a/PROJECT b/PROJECT index 7874bda38c..6514dde9b3 100644 --- a/PROJECT +++ b/PROJECT @@ -1,40 +1,43 @@ -version: "2" +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: cluster.x-k8s.io repo: sigs.k8s.io/cluster-api-provider-openstack resources: - group: infrastructure - version: v1alpha5 kind: OpenStackCluster -- group: infrastructure version: v1alpha5 - kind: OpenStackMachine - group: infrastructure + kind: OpenStackMachine version: v1alpha5 +- group: infrastructure kind: OpenStackMachineTemplate + version: v1alpha5 - group: infrastructure kind: OpenStackClusterTemplate version: v1alpha5 - group: infrastructure - version: v1alpha6 kind: OpenStackCluster -- group: infrastructure version: v1alpha6 - kind: OpenStackMachine - group: infrastructure + kind: OpenStackMachine version: v1alpha6 +- group: infrastructure kind: OpenStackMachineTemplate + version: v1alpha6 - group: infrastructure kind: OpenStackClusterTemplate version: v1alpha6 - group: infrastructure - version: v1alpha7 kind: OpenStackCluster -- group: infrastructure version: v1alpha7 - kind: OpenStackMachine - group: infrastructure + kind: OpenStackMachine version: v1alpha7 +- group: infrastructure kind: OpenStackMachineTemplate + version: v1alpha7 - group: infrastructure kind: OpenStackClusterTemplate version: v1alpha7 @@ -50,3 +53,7 @@ resources: - group: infrastructure kind: OpenStackClusterTemplate version: v1alpha8 +- group: infrastructure + kind: OpenStackFloatingIPPool + version: v1alpha1 +version: "2" diff --git a/api/v1alpha1/conditions_consts.go b/api/v1alpha1/conditions_consts.go new file mode 100644 index 0000000000..32d80d51d7 --- /dev/null +++ b/api/v1alpha1/conditions_consts.go @@ -0,0 +1,22 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + // OpenstackFloatingIPPoolReadyCondition reports on the current status of the floating ip pool. Ready indicates that the pool is ready to be used. + OpenstackFloatingIPPoolReadyCondition = "OpenstackFloatingIPPoolReadyCondition" +) diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go new file mode 100644 index 0000000000..7fff087c61 --- /dev/null +++ b/api/v1alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..fee80edbaa --- /dev/null +++ b/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package v1alpha1 contains API Schema definitions for the infrastructure v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=infrastructure.cluster.x-k8s.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha1/openstackfloatingippool_types.go b/api/v1alpha1/openstackfloatingippool_types.go new file mode 100644 index 0000000000..0f6c4e129f --- /dev/null +++ b/api/v1alpha1/openstackfloatingippool_types.go @@ -0,0 +1,136 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + // We use v1alpha7 here rather than anything newer because as of writing + // it is the newest API version we should no longer be making breaking + // changes to. If we bump this we need to look carefully for resulting + // CRD changes in v1alpha1 to ensure they are compatible. + infrav1alpha7 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" +) + +const ( + // OpenStackFloatingIPPoolFinalizer allows ReconcileOpenStackFloatingIPPool to clean up resources associated with OpenStackFloatingIPPool before + // removing it from the apiserver. + OpenStackFloatingIPPoolFinalizer = "openstackfloatingippool.infrastructure.cluster.x-k8s.io" + + OpenStackFloatingIPPoolNameIndex = "spec.poolRef.name" + + // OpenStackFloatingIPPoolIP. + DeleteFloatingIPFinalizer = "openstackfloatingippool.infrastructure.cluster.x-k8s.io/delete-floating-ip" +) + +// ReclaimPolicy is a string type alias to represent reclaim policies for floating ips. +type ReclaimPolicy string + +const ( + // ReclaimDelete is the reclaim policy for floating ips. + ReclaimDelete ReclaimPolicy = "Delete" + // ReclaimRetain is the reclaim policy for floating ips. + ReclaimRetain ReclaimPolicy = "Retain" +) + +// OpenStackFloatingIPPoolSpec defines the desired state of OpenStackFloatingIPPool. +type OpenStackFloatingIPPoolSpec struct { + // PreAllocatedFloatingIPs is a list of floating IPs precreated in OpenStack that should be used by this pool. + // These are used before allocating new ones and are not deleted from OpenStack when the pool is deleted. + PreAllocatedFloatingIPs []string `json:"preAllocatedFloatingIPs,omitempty"` + + // IdentityRef is a reference to a identity to be used when reconciling this pool. + // +optional + IdentityRef *infrav1alpha7.OpenStackIdentityReference `json:"identityRef,omitempty"` + + // FloatingIPNetwork is the external network to use for floating ips, if there's only one external network it will be used by default + // +optional + FloatingIPNetwork infrav1alpha7.NetworkFilter `json:"floatingIPNetwork"` + + // The name of the cloud to use from the clouds secret + // +optional + CloudName string `json:"cloudName"` + + // The stratergy to use for reclaiming floating ips when they are released from a machine + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=Retain;Delete + ReclaimPolicy ReclaimPolicy `json:"reclaimPolicy"` +} + +// OpenStackFloatingIPPoolStatus defines the observed state of OpenStackFloatingIPPool. +type OpenStackFloatingIPPoolStatus struct { + // +kubebuilder:default={} + // +optional + ClaimedIPs []string `json:"claimedIPs"` + + // +kubebuilder:default={} + // +optional + AvailableIPs []string `json:"availableIPs"` + + // FailedIPs contains a list of floating ips that failed to be allocated + // +optional + FailedIPs []string `json:"failedIPs,omitempty"` + + // floatingIPNetwork contains information about the network used for floating ips + // +optional + FloatingIPNetwork *infrav1alpha7.NetworkStatus `json:"floatingIPNetwork,omitempty"` + + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +//+kubebuilder:object:root=true +// +kubebuilder:storageversion +//+kubebuilder:subresource:status + +// OpenStackFloatingIPPool is the Schema for the openstackfloatingippools API. +type OpenStackFloatingIPPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OpenStackFloatingIPPoolSpec `json:"spec,omitempty"` + Status OpenStackFloatingIPPoolStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OpenStackFloatingIPPoolList contains a list of OpenStackFloatingIPPool. +type OpenStackFloatingIPPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackFloatingIPPool `json:"items"` +} + +// GetConditions returns the observations of the operational state of the OpenStackFloatingIPPool resource. +func (r *OpenStackFloatingIPPool) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the OpenStackFloatingIPPool to the predescribed clusterv1.Conditions. +func (r *OpenStackFloatingIPPool) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + +func (r *OpenStackFloatingIPPool) GetFloatingIPTag() string { + return fmt.Sprintf("cluster-api-provider-openstack-fip-pool-%s", r.Name) +} + +func init() { + SchemeBuilder.Register(&OpenStackFloatingIPPool{}, &OpenStackFloatingIPPoolList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0eef1f41d3 --- /dev/null +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,154 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" + "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackFloatingIPPool) DeepCopyInto(out *OpenStackFloatingIPPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackFloatingIPPool. +func (in *OpenStackFloatingIPPool) DeepCopy() *OpenStackFloatingIPPool { + if in == nil { + return nil + } + out := new(OpenStackFloatingIPPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackFloatingIPPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackFloatingIPPoolList) DeepCopyInto(out *OpenStackFloatingIPPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackFloatingIPPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackFloatingIPPoolList. +func (in *OpenStackFloatingIPPoolList) DeepCopy() *OpenStackFloatingIPPoolList { + if in == nil { + return nil + } + out := new(OpenStackFloatingIPPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackFloatingIPPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackFloatingIPPoolSpec) DeepCopyInto(out *OpenStackFloatingIPPoolSpec) { + *out = *in + if in.PreAllocatedFloatingIPs != nil { + in, out := &in.PreAllocatedFloatingIPs, &out.PreAllocatedFloatingIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IdentityRef != nil { + in, out := &in.IdentityRef, &out.IdentityRef + *out = new(v1alpha7.OpenStackIdentityReference) + **out = **in + } + out.FloatingIPNetwork = in.FloatingIPNetwork +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackFloatingIPPoolSpec. +func (in *OpenStackFloatingIPPoolSpec) DeepCopy() *OpenStackFloatingIPPoolSpec { + if in == nil { + return nil + } + out := new(OpenStackFloatingIPPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackFloatingIPPoolStatus) DeepCopyInto(out *OpenStackFloatingIPPoolStatus) { + *out = *in + if in.ClaimedIPs != nil { + in, out := &in.ClaimedIPs, &out.ClaimedIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AvailableIPs != nil { + in, out := &in.AvailableIPs, &out.AvailableIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FailedIPs != nil { + in, out := &in.FailedIPs, &out.FailedIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FloatingIPNetwork != nil { + in, out := &in.FloatingIPNetwork, &out.FloatingIPNetwork + *out = new(v1alpha7.NetworkStatus) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackFloatingIPPoolStatus. +func (in *OpenStackFloatingIPPoolStatus) DeepCopy() *OpenStackFloatingIPPoolStatus { + if in == nil { + return nil + } + out := new(OpenStackFloatingIPPoolStatus) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackfloatingippools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackfloatingippools.yaml new file mode 100644 index 0000000000..a38c86c0cc --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackfloatingippools.yaml @@ -0,0 +1,191 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: openstackfloatingippools.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + kind: OpenStackFloatingIPPool + listKind: OpenStackFloatingIPPoolList + plural: openstackfloatingippools + singular: openstackfloatingippool + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OpenStackFloatingIPPool is the Schema for the openstackfloatingippools + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpenStackFloatingIPPoolSpec defines the desired state of + OpenStackFloatingIPPool. + properties: + cloudName: + description: The name of the cloud to use from the clouds secret + type: string + floatingIPNetwork: + description: FloatingIPNetwork is the external network to use for + floating ips, if there's only one external network it will be used + by default + properties: + description: + type: string + id: + type: string + name: + type: string + notTags: + type: string + notTagsAny: + type: string + projectId: + type: string + tags: + type: string + tagsAny: + type: string + type: object + identityRef: + description: IdentityRef is a reference to a identity to be used when + reconciling this pool. + properties: + kind: + description: |- + Kind of the identity. Must be supported by the infrastructure + provider and may be either cluster or namespace-scoped. + minLength: 1 + type: string + name: + description: |- + Name of the infrastructure identity to be used. + Must be either a cluster-scoped resource, or namespaced-scoped + resource the same namespace as the resource(s) being provisioned. + type: string + required: + - kind + - name + type: object + preAllocatedFloatingIPs: + description: |- + PreAllocatedFloatingIPs is a list of floating IPs precreated in OpenStack that should be used by this pool. + These are used before allocating new ones and are not deleted from OpenStack when the pool is deleted. + items: + type: string + type: array + reclaimPolicy: + description: The stratergy to use for reclaiming floating ips when + they are released from a machine + enum: + - Retain + - Delete + type: string + type: object + status: + description: OpenStackFloatingIPPoolStatus defines the observed state + of OpenStackFloatingIPPool. + properties: + availableIPs: + default: [] + items: + type: string + type: array + claimedIPs: + default: [] + items: + type: string + type: array + conditions: + description: Conditions provide observations of the operational state + of a Cluster API resource. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. + type: string + severity: + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failedIPs: + description: FailedIPs contains a list of floating ips that failed + to be allocated + items: + type: string + type: array + floatingIPNetwork: + description: floatingIPNetwork contains information about the network + used for floating ips + properties: + id: + type: string + name: + type: string + tags: + items: + type: string + type: array + required: + - id + - name + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 283048b008..c153237cc4 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -9,6 +9,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml - bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml - bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml +- bases/infrastructure.cluster.x-k8s.io_openstackfloatingippools.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: @@ -18,6 +19,7 @@ patches: - path: patches/webhook_in_openstackmachines.yaml - path: patches/webhook_in_openstackmachinetemplates.yaml - path: patches/webhook_in_openstackclustertemplates.yaml +#- patches/webhook_in_openstackfloatingippools.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_openstackfloatingippools.yaml b/config/crd/patches/cainjection_in_openstackfloatingippools.yaml new file mode 100644 index 0000000000..edb01f4691 --- /dev/null +++ b/config/crd/patches/cainjection_in_openstackfloatingippools.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: openstackfloatingippools.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_openstackfloatingippools.yaml b/config/crd/patches/webhook_in_openstackfloatingippools.yaml new file mode 100644 index 0000000000..abe4d216dc --- /dev/null +++ b/config/crd/patches/webhook_in_openstackfloatingippools.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: openstackfloatingippools.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/openstackfloatingippool_editor_role.yaml b/config/rbac/openstackfloatingippool_editor_role.yaml new file mode 100644 index 0000000000..c126a23d4b --- /dev/null +++ b/config/rbac/openstackfloatingippool_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit openstackfloatingippools. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: openstackfloatingippool-editor-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackfloatingippools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackfloatingippools/status + verbs: + - get diff --git a/config/rbac/openstackfloatingippool_viewer_role.yaml b/config/rbac/openstackfloatingippool_viewer_role.yaml new file mode 100644 index 0000000000..81cf2f1dbf --- /dev/null +++ b/config/rbac/openstackfloatingippool_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view openstackfloatingippools. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: openstackfloatingippool-viewer-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackfloatingippools + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackfloatingippools/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 0eab520098..6a40632602 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -73,6 +73,26 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackfloatingippools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - openstackfloatingippools/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: @@ -93,3 +113,27 @@ rules: - get - patch - update +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddressclaims + - ipaddressclaims/status + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddresses + - ipaddresses/status + verbs: + - create + - delete + - get + - list + - update + - watch diff --git a/controllers/openstackfloatingippool_controller.go b/controllers/openstackfloatingippool_controller.go new file mode 100644 index 0000000000..6ee65140a3 --- /dev/null +++ b/controllers/openstackfloatingippool_controller.go @@ -0,0 +1,475 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" + infrav1alpha7 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha8" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" +) + +const ( + openStackFloatingIPPool = "OpenStackFloatingIPPool" +) + +var backoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, +} + +// OpenStackFloatingIPPoolReconciler reconciles a OpenStackFloatingIPPool object. +type OpenStackFloatingIPPoolReconciler struct { + Client client.Client + Recorder record.EventRecorder + WatchFilterValue string + ScopeFactory scope.Factory + CaCertificates []byte // PEM encoded ca certificates. + + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackfloatingippools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackfloatingippools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims;ipaddressclaims/status,verbs=get;list;watch;update;create;delete +// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses;ipaddresses/status,verbs=get;list;watch;create;update;delete + +func (r *OpenStackFloatingIPPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + pool := &infrav1alpha1.OpenStackFloatingIPPool{} + if err := r.Client.Get(ctx, req.NamespacedName, pool); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + scope, err := r.ScopeFactory.NewClientScopeFromFloatingIPPool(ctx, r.Client, pool, r.CaCertificates, log) + if err != nil { + return reconcile.Result{}, err + } + + // This is done before deleting the pool, because we want to handle deleted IPs before we delete the pool + if err := r.reconcileIPAddresses(ctx, scope, pool); err != nil { + return ctrl.Result{}, err + } + + if pool.ObjectMeta.DeletionTimestamp.IsZero() { + // Add finalizer if it does not exist + if controllerutil.AddFinalizer(pool, infrav1alpha1.OpenStackFloatingIPPoolFinalizer) { + return ctrl.Result{}, r.Client.Update(ctx, pool) + } + } else { + // Handle deletion + return ctrl.Result{}, r.reconcileDelete(ctx, scope, pool) + } + + patchHelper, err := patch.NewHelper(pool, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + defer func() { + if err := patchHelper.Patch(ctx, pool); err != nil { + if reterr == nil { + reterr = fmt.Errorf("error patching OpenStackFloatingIPPool %s/%s: %w", pool.Namespace, pool.Name, err) + } + } + }() + + if err := r.reconcileFloatingIPNetwork(scope, pool); err != nil { + return ctrl.Result{}, err + } + + claims := &ipamv1.IPAddressClaimList{} + if err := r.Client.List(context.Background(), claims, client.InNamespace(req.Namespace), client.MatchingFields{infrav1alpha1.OpenStackFloatingIPPoolNameIndex: pool.Name}); err != nil { + return ctrl.Result{}, err + } + + for _, claim := range claims.Items { + claim := claim + log := log.WithValues("claim", claim.Name) + if !claim.ObjectMeta.DeletionTimestamp.IsZero() { + continue + } + + if claim.Status.AddressRef.Name == "" { + ipAddress := &ipamv1.IPAddress{} + err := r.Client.Get(ctx, client.ObjectKey{Name: claim.Name, Namespace: claim.Namespace}, ipAddress) + if client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, err + } + if apierrors.IsNotFound(err) { + ip, err := r.getIP(ctx, scope, pool) + if err != nil { + return ctrl.Result{}, err + } + + ipAddress = &ipamv1.IPAddress{ + ObjectMeta: ctrl.ObjectMeta{ + Name: claim.Name, + Namespace: claim.Namespace, + Finalizers: []string{ + infrav1alpha1.DeleteFloatingIPFinalizer, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: claim.APIVersion, + Kind: claim.Kind, + Name: claim.Name, + UID: claim.UID, + }, + }, + }, + Spec: ipamv1.IPAddressSpec{ + ClaimRef: corev1.LocalObjectReference{ + Name: claim.Name, + }, + PoolRef: corev1.TypedLocalObjectReference{ + APIGroup: pointer.String(infrav1alpha1.GroupVersion.Group), + Kind: pool.Kind, + Name: pool.Name, + }, + Address: ip, + Prefix: 32, + }, + } + + // Retry creating the IPAddress object + err = wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { + if err := r.Client.Create(ctx, ipAddress); err != nil { + return false, err + } + return true, nil + }) + if err != nil { + // If we failed to create the IPAddress, there might be an IP leak in OpenStack if we also failed to tag the IP after creation + scope.Logger().Error(err, "Failed to create IPAddress", "ip", ip) + return ctrl.Result{}, err + } + } + claim.Status.AddressRef.Name = ipAddress.Name + if err = r.Client.Status().Update(ctx, &claim); err != nil { + log.Error(err, "Failed to update IPAddressClaim status", "claim", claim.Name, "ipaddress", ipAddress.Name) + return ctrl.Result{}, err + } + scope.Logger().Info("Claimed IP", "ip", ipAddress.Spec.Address) + } + } + return ctrl.Result{}, r.Client.Status().Update(ctx, pool) +} + +func (r *OpenStackFloatingIPPoolReconciler) reconcileDelete(ctx context.Context, scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) error { + log := ctrl.LoggerFrom(ctx) + ipAddresses := &ipamv1.IPAddressList{} + if err := r.Client.List(ctx, ipAddresses, client.InNamespace(pool.Namespace), client.MatchingFields{infrav1alpha1.OpenStackFloatingIPPoolNameIndex: pool.Name}); err != nil { + return err + } + + // If there are still IPAddress objects that are not deleted, there are still claims on this pool and we should not delete the + // pool because it is needed to clean up the addresses from openstack + if len(ipAddresses.Items) > 0 { + log.Info("Waiting for IPAddress to be deleted before deleting OpenStackFloatingIPPool") + return errors.New("waiting for IPAddress to be deleted, until we can delete the OpenStackFloatingIPPool") + } + + networkingService, err := networking.NewService(scope) + if err != nil { + return err + } + + for _, ip := range diff(pool.Status.AvailableIPs, pool.Spec.PreAllocatedFloatingIPs) { + if err := networkingService.DeleteFloatingIP(pool, ip); err != nil { + return fmt.Errorf("delete floating IP: %w", err) + } + // Remove the IP from the available IPs, so we don't try to delete it again if the reconcile loop runs again + pool.Status.AvailableIPs = diff(pool.Status.AvailableIPs, []string{ip}) + } + + if controllerutil.RemoveFinalizer(pool, infrav1alpha1.OpenStackFloatingIPPoolFinalizer) { + log.Info("Removing finalizer from OpenStackFloatingIPPool") + return r.Client.Update(ctx, pool) + } + return nil +} + +func union(a []string, b []string) []string { + m := make(map[string]struct{}) + for _, item := range a { + m[item] = struct{}{} + } + for _, item := range b { + m[item] = struct{}{} + } + result := make([]string, 0, len(m)) + for item := range m { + result = append(result, item) + } + return result +} + +func diff(a []string, b []string) []string { + m := make(map[string]struct{}) + for _, item := range a { + m[item] = struct{}{} + } + for _, item := range b { + delete(m, item) + } + result := make([]string, 0, len(m)) + for item := range m { + result = append(result, item) + } + return result +} + +func (r *OpenStackFloatingIPPoolReconciler) reconcileIPAddresses(ctx context.Context, scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) error { + ipAddresses := &ipamv1.IPAddressList{} + if err := r.Client.List(ctx, ipAddresses, client.InNamespace(pool.Namespace), client.MatchingFields{infrav1alpha1.OpenStackFloatingIPPoolNameIndex: pool.Name}); err != nil { + return err + } + + networkingService, err := networking.NewService(scope) + if err != nil { + return err + } + pool.Status.ClaimedIPs = []string{} + if pool.Status.AvailableIPs == nil { + pool.Status.AvailableIPs = []string{} + } + + for i := 0; i < len(ipAddresses.Items); i++ { + ipAddress := &(ipAddresses.Items[i]) + if ipAddress.ObjectMeta.DeletionTimestamp.IsZero() { + pool.Status.ClaimedIPs = append(pool.Status.ClaimedIPs, ipAddress.Spec.Address) + continue + } + + if controllerutil.ContainsFinalizer(ipAddress, infrav1alpha1.DeleteFloatingIPFinalizer) { + if pool.Spec.ReclaimPolicy == infrav1alpha1.ReclaimDelete && !contains(pool.Spec.PreAllocatedFloatingIPs, ipAddress.Spec.Address) { + if err = networkingService.DeleteFloatingIP(pool, ipAddress.Spec.Address); err != nil { + return fmt.Errorf("delete floating IP %q: %w", ipAddress.Spec.Address, err) + } + } else { + pool.Status.AvailableIPs = append(pool.Status.AvailableIPs, ipAddress.Spec.Address) + } + } + controllerutil.RemoveFinalizer(ipAddress, infrav1alpha1.DeleteFloatingIPFinalizer) + if err := r.Client.Update(ctx, ipAddress); err != nil { + return err + } + } + unclaimedPreAllocatedIPs := diff(pool.Spec.PreAllocatedFloatingIPs, pool.Status.ClaimedIPs) + unclaimedIPs := union(pool.Status.AvailableIPs, unclaimedPreAllocatedIPs) + pool.Status.AvailableIPs = diff(unclaimedIPs, pool.Status.FailedIPs) + return nil +} + +func (r *OpenStackFloatingIPPoolReconciler) getIP(ctx context.Context, scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) (string, error) { + // There's a potential leak of IPs here, if the reconcile loop fails after we claim an IP but before we create the IPAddress object. + var ip string + + networkingService, err := networking.NewService(scope) + if err != nil { + scope.Logger().Error(err, "Failed to create networking service") + return "", err + } + + // Get tagged floating IPs and add them to the available IPs if they are not present in either the available IPs or the claimed IPs + // This is done to prevent leaking floating IPs if to prevent leaking floating IPs if the floating IP was created but the IPAddress object was not + if len(pool.Status.AvailableIPs) == 0 { + taggedIPs, err := networkingService.GetFloatingIPsByTag(pool.GetFloatingIPTag()) + if err != nil { + scope.Logger().Error(err, "Failed to get floating IPs by tag", "pool", pool.Name) + return "", err + } + for _, taggedIP := range taggedIPs { + if contains(pool.Status.AvailableIPs, taggedIP.FloatingIP) || contains(pool.Status.ClaimedIPs, taggedIP.FloatingIP) { + continue + } + scope.Logger().Info("Tagged floating IP found that was not known to the pool, adding it to the pool", "ip", taggedIP.FloatingIP) + pool.Status.AvailableIPs = append(pool.Status.AvailableIPs, taggedIP.FloatingIP) + } + } + + if len(pool.Status.AvailableIPs) > 0 { + ip = pool.Status.AvailableIPs[0] + pool.Status.AvailableIPs = pool.Status.AvailableIPs[1:] + } + + if ip != "" { + fp, err := networkingService.GetFloatingIP(ip) + if err != nil { + return "", fmt.Errorf("get floating IP: %w", err) + } + if fp != nil { + return fp.FloatingIP, nil + } + } + + fp, err := networkingService.CreateFloatingIPForPool(pool) + if err != nil { + scope.Logger().Error(err, "Failed to create floating IP", "pool", pool.Name) + conditions.MarkFalse(pool, infrav1alpha1.OpenstackFloatingIPPoolReadyCondition, infrav1.OpenStackErrorReason, clusterv1.ConditionSeverityError, "Failed to create floating IP: %v", err) + if ip != "" { + pool.Status.FailedIPs = append(pool.Status.FailedIPs, ip) + } + return "", err + } + defer func() { + tag := pool.GetFloatingIPTag() + + err := wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { + if err := networkingService.TagFloatingIP(fp.FloatingIP, tag); err != nil { + scope.Logger().Error(err, "Failed to tag floating IP, retrying", "ip", fp.FloatingIP, "tag", tag) + return false, err + } + return true, nil + }) + if err != nil { + scope.Logger().Error(err, "Failed to tag floating IP", "ip", fp.FloatingIP, "tag", tag) + } + }() + + conditions.MarkTrue(pool, infrav1alpha1.OpenstackFloatingIPPoolReadyCondition) + + ip = fp.FloatingIP + pool.Status.ClaimedIPs = append(pool.Status.ClaimedIPs, ip) + return ip, nil +} + +func (r *OpenStackFloatingIPPoolReconciler) reconcileFloatingIPNetwork(scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) error { + // If the pool already has a network, we don't need to do anything + if pool.Status.FloatingIPNetwork != nil { + return nil + } + + networkingService, err := networking.NewService(scope) + if err != nil { + return err + } + + netListOpts := external.ListOptsExt{ + ListOptsBuilder: pool.Spec.FloatingIPNetwork.ToListOpt(), + External: pointer.Bool(true), + } + + networkList, err := networkingService.GetNetworksByFilter(&netListOpts) + if err != nil { + return fmt.Errorf("failed to find network: %w", err) + } + if len(networkList) > 1 { + return fmt.Errorf("found multiple networks, expects filter to match one (result: %v)", networkList) + } + + pool.Status.FloatingIPNetwork = &infrav1alpha7.NetworkStatus{ + ID: networkList[0].ID, + Name: networkList[0].Name, + Tags: networkList[0].Tags, + } + return nil +} + +func (r *OpenStackFloatingIPPoolReconciler) ipAddressClaimToPoolMapper(_ context.Context, o client.Object) []ctrl.Request { + claim, ok := o.(*ipamv1.IPAddressClaim) + if !ok { + panic(fmt.Sprintf("Expected a IPAddressClaim but got a %T", o)) + } + if claim.Spec.PoolRef.Kind != openStackFloatingIPPool { + return nil + } + return []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Name: claim.Spec.PoolRef.Name, + Namespace: claim.Namespace, + }, + }, + } +} + +func (r *OpenStackFloatingIPPoolReconciler) ipAddressToPoolMapper(_ context.Context, o client.Object) []ctrl.Request { + ip, ok := o.(*ipamv1.IPAddress) + if !ok { + panic(fmt.Sprintf("Expected a IPAddress but got a %T", o)) + } + if ip.Spec.PoolRef.Kind != openStackFloatingIPPool { + return nil + } + return []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Name: ip.Spec.PoolRef.Name, + Namespace: ip.Namespace, + }, + }, + } +} + +func (r *OpenStackFloatingIPPoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField(ctx, &ipamv1.IPAddressClaim{}, infrav1alpha1.OpenStackFloatingIPPoolNameIndex, func(rawObj client.Object) []string { + claim := rawObj.(*ipamv1.IPAddressClaim) + if claim.Spec.PoolRef.Kind != openStackFloatingIPPool { + return nil + } + return []string{claim.Spec.PoolRef.Name} + }); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField(ctx, &ipamv1.IPAddress{}, infrav1alpha1.OpenStackFloatingIPPoolNameIndex, func(rawObj client.Object) []string { + ip := rawObj.(*ipamv1.IPAddress) + return []string{ip.Spec.PoolRef.Name} + }); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1alpha1.OpenStackFloatingIPPool{}). + Watches( + &ipamv1.IPAddressClaim{}, + handler.EnqueueRequestsFromMapFunc(r.ipAddressClaimToPoolMapper), + ). + Watches( + &ipamv1.IPAddress{}, + handler.EnqueueRequestsFromMapFunc(r.ipAddressToPoolMapper), + ). + Complete(r) +} diff --git a/main.go b/main.go index 40ee63009c..01c6caa6a3 100644 --- a/main.go +++ b/main.go @@ -34,6 +34,7 @@ import ( _ "k8s.io/component-base/logs/json/register" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/util/flags" ctrl "sigs.k8s.io/controller-runtime" cache "sigs.k8s.io/controller-runtime/pkg/cache" @@ -42,6 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/webhook" + infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1alpha5 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha5" infrav1alpha6 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha6" infrav1alpha7 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" @@ -84,10 +86,12 @@ var ( func init() { _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) + _ = ipamv1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = infrav1alpha5.AddToScheme(scheme) _ = infrav1alpha6.AddToScheme(scheme) _ = infrav1alpha7.AddToScheme(scheme) + _ = infrav1alpha1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme metrics.RegisterAPIPrometheusMetrics() @@ -254,7 +258,6 @@ func main() { setupChecks(mgr) setupReconcilers(ctx, mgr, caCerts, scopeFactory) setupWebhooks(mgr) - // +kubebuilder:scaffold:builder setupLog.Info("starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { @@ -296,6 +299,16 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, caCerts []byte, sco setupLog.Error(err, "unable to create controller", "controller", "OpenStackMachine") os.Exit(1) } + if err := (&controllers.OpenStackFloatingIPPoolReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("floatingippool-controller"), + ScopeFactory: scopeFactory, + Scheme: mgr.GetScheme(), + CaCertificates: caCerts, + }).SetupWithManager(ctx, mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "FloatingIPPool") + os.Exit(1) + } } func setupWebhooks(mgr ctrl.Manager) { diff --git a/pkg/cloud/services/networking/floatingip.go b/pkg/cloud/services/networking/floatingip.go index 61b7e77d94..8586ba6223 100644 --- a/pkg/cloud/services/networking/floatingip.go +++ b/pkg/cloud/services/networking/floatingip.go @@ -17,6 +17,7 @@ limitations under the License. package networking import ( + "fmt" "time" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" @@ -24,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha8" "sigs.k8s.io/cluster-api-provider-openstack/pkg/metrics" "sigs.k8s.io/cluster-api-provider-openstack/pkg/record" @@ -70,6 +72,49 @@ func (s *Service) GetOrCreateFloatingIP(eventObject runtime.Object, openStackClu return fp, nil } +func (s *Service) CreateFloatingIPForPool(pool *v1alpha1.OpenStackFloatingIPPool) (*floatingips.FloatingIP, error) { + var fpCreateOpts floatingips.CreateOpts + + fpCreateOpts.FloatingNetworkID = pool.Status.FloatingIPNetwork.ID + fpCreateOpts.Description = fmt.Sprintf("Created by cluster-api-provider-openstack OpenStackFloatingIPPool %s", pool.Name) + + fp, err := s.client.CreateFloatingIP(fpCreateOpts) + if err != nil { + record.Warnf(pool, "FailedCreateFloatingIP", "%s failed to create floating IP: %v", pool.Name, err) + return nil, err + } + + record.Eventf(pool, "SuccessfulCreateFloatingIP", "%s created floating IP %s with id %s", pool.Name, fp.FloatingIP, fp.ID) + return fp, nil +} + +func (s *Service) TagFloatingIP(ip string, tag string) error { + fip, err := s.GetFloatingIP(ip) + if err != nil { + return err + } + if fip == nil { + return nil + } + + mc := metrics.NewMetricPrometheusContext("floating_ip", "update") + _, err = s.client.ReplaceAllAttributesTags("floatingips", fip.ID, attributestags.ReplaceAllOpts{ + Tags: []string{tag}, + }) + if mc.ObserveRequest(err) != nil { + return err + } + return nil +} + +func (s *Service) GetFloatingIPsByTag(tag string) ([]floatingips.FloatingIP, error) { + fipList, err := s.client.ListFloatingIP(floatingips.ListOpts{Tags: tag}) + if err != nil { + return nil, err + } + return fipList, nil +} + func (s *Service) GetFloatingIP(ip string) (*floatingips.FloatingIP, error) { fpList, err := s.client.ListFloatingIP(floatingips.ListOpts{FloatingIP: ip}) if err != nil { diff --git a/pkg/scope/mock.go b/pkg/scope/mock.go index b4025c63d2..7dd2364b92 100644 --- a/pkg/scope/mock.go +++ b/pkg/scope/mock.go @@ -25,6 +25,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha8" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" @@ -80,6 +81,13 @@ func (f *MockScopeFactory) NewClientScopeFromCluster(_ context.Context, _ client return f, nil } +func (f *MockScopeFactory) NewClientScopeFromFloatingIPPool(_ context.Context, _ client.Client, _ *v1alpha1.OpenStackFloatingIPPool, _ []byte, _ logr.Logger) (Scope, error) { + if f.clientScopeCreateError != nil { + return nil, f.clientScopeCreateError + } + return f, nil +} + func (f *MockScopeFactory) NewComputeClient() (clients.ComputeClient, error) { return f.ComputeClient, nil } diff --git a/pkg/scope/provider.go b/pkg/scope/provider.go index 991b5f4805..e7a060d3f4 100644 --- a/pkg/scope/provider.go +++ b/pkg/scope/provider.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha8" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients" "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/hash" @@ -104,6 +105,29 @@ func (f *providerScopeFactory) NewClientScopeFromCluster(ctx context.Context, ct return NewCachedProviderScope(f.clientCache, cloud, caCert, logger) } +func (f *providerScopeFactory) NewClientScopeFromFloatingIPPool(ctx context.Context, ctrlClient client.Client, openstackFloatingIPPool *v1alpha1.OpenStackFloatingIPPool, defaultCACert []byte, logger logr.Logger) (Scope, error) { + var cloud clientconfig.Cloud + var caCert []byte + + if openstackFloatingIPPool.Spec.IdentityRef != nil { + var err error + cloud, caCert, err = getCloudFromSecret(ctx, ctrlClient, openstackFloatingIPPool.Namespace, openstackFloatingIPPool.Spec.IdentityRef.Name, openstackFloatingIPPool.Spec.CloudName) + if err != nil { + return nil, err + } + } + + if caCert == nil { + caCert = defaultCACert + } + + if f.clientCache == nil { + return NewProviderScope(cloud, caCert, logger) + } + + return NewCachedProviderScope(f.clientCache, cloud, caCert, logger) +} + func getScopeCacheKey(cloud clientconfig.Cloud) (string, error) { key, err := hash.ComputeSpewHash(cloud) if err != nil { diff --git a/pkg/scope/scope.go b/pkg/scope/scope.go index d7458152fb..5c8f24b968 100644 --- a/pkg/scope/scope.go +++ b/pkg/scope/scope.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha8" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients" ) @@ -43,6 +44,7 @@ func NewFactory(maxCacheSize int) Factory { type Factory interface { NewClientScopeFromMachine(ctx context.Context, ctrlClient client.Client, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster, defaultCACert []byte, logger logr.Logger) (Scope, error) NewClientScopeFromCluster(ctx context.Context, ctrlClient client.Client, openStackCluster *infrav1.OpenStackCluster, defaultCACert []byte, logger logr.Logger) (Scope, error) + NewClientScopeFromFloatingIPPool(ctx context.Context, ctrlClient client.Client, openStackCluster *v1alpha1.OpenStackFloatingIPPool, defaultCACert []byte, logger logr.Logger) (Scope, error) } // Scope contains arguments common to most operations. diff --git a/pkg/utils/ipam/predicates.go b/pkg/utils/ipam/predicates.go new file mode 100644 index 0000000000..43f3d3fb10 --- /dev/null +++ b/pkg/utils/ipam/predicates.go @@ -0,0 +1,96 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +func processIfClaimReferencesPoolKind(gk metav1.GroupKind, obj client.Object) bool { + var claim *ipamv1.IPAddressClaim + var ok bool + if claim, ok = obj.(*ipamv1.IPAddressClaim); !ok { + return false + } + + if claim.Spec.PoolRef.Kind != gk.Kind || claim.Spec.PoolRef.APIGroup == nil || *claim.Spec.PoolRef.APIGroup != gk.Group { + return false + } + + return true +} + +// ClaimReferencesPoolKind is a predicate that ensures an ipamv1.IPAddressClaim references a specified pool kind. +func ClaimReferencesPoolKind(gk metav1.GroupKind) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return processIfClaimReferencesPoolKind(gk, e.Object) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return processIfClaimReferencesPoolKind(gk, e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return processIfClaimReferencesPoolKind(gk, e.ObjectNew) + }, + GenericFunc: func(e event.GenericEvent) bool { + return processIfClaimReferencesPoolKind(gk, e.Object) + }, + } +} + +func processIfAddressReferencesPoolKind(gk metav1.GroupKind, obj client.Object) bool { + var addr *ipamv1.IPAddress + var ok bool + if addr, ok = obj.(*ipamv1.IPAddress); !ok { + return false + } + + if addr.Spec.PoolRef.Kind != gk.Kind || addr.Spec.PoolRef.APIGroup == nil || *addr.Spec.PoolRef.APIGroup != gk.Group { + return false + } + + return true +} + +// AddressReferencesPoolKind is a predicate that ensures an ipamv1.IPAddress references a specified pool kind. +func AddressReferencesPoolKind(gk metav1.GroupKind) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return processIfAddressReferencesPoolKind(gk, e.Object) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return processIfAddressReferencesPoolKind(gk, e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return processIfAddressReferencesPoolKind(gk, e.ObjectNew) + }, + GenericFunc: func(e event.GenericEvent) bool { + return processIfAddressReferencesPoolKind(gk, e.Object) + }, + } +} + +func HasFinalizerAndIsDeleting(finalizer string) predicate.Funcs { + return predicate.NewPredicateFuncs(func(obj client.Object) bool { + return !obj.GetDeletionTimestamp().IsZero() && controllerutil.ContainsFinalizer(obj, finalizer) + }) +}