diff --git a/.golangci.yml b/.golangci.yml index 062a5443fb63..682f2cca5090 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -70,6 +70,55 @@ linters-settings: # Controller Runtime - pkg: sigs.k8s.io/controller-runtime alias: ctrl + # CABPK + - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3 + alias: bootstrapv1alpha3 + - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4 + alias: bootstrapv1alpha4 + - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1 + alias: bootstrapv1 + # KCP + - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3 + alias: controlplanev1alpha3 + - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4 + alias: controlplanev1alpha4 + - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1 + alias: controlplanev1 + # CAPI + - pkg: sigs.k8s.io/cluster-api/api/v1alpha3 + alias: clusterv1alpha3 + - pkg: sigs.k8s.io/cluster-api/api/v1alpha4 + alias: clusterv1alpha4 + - pkg: sigs.k8s.io/cluster-api/api/v1beta1 + alias: clusterv1 + # CAPI exp + - pkg: sigs.k8s.io/cluster-api/exp/api/v1alpha3 + alias: expv1alpha3 + - pkg: sigs.k8s.io/cluster-api/exp/api/v1alpha4 + alias: expv1alpha4 + - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 + alias: expv1 + # CAPI exp addons + - pkg: sigs.k8s.io/cluster-api/exp/addons/api/v1alpha3 + alias: addonsv1alpha3 + - pkg: sigs.k8s.io/cluster-api/exp/addons/api/v1alpha4 + alias: addonsv1alpha4 + - pkg: sigs.k8s.io/cluster-api/exp/addons/api/v1beta1 + alias: addonsv1 + # CAPD + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3 + alias: infrav1alpha3 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4 + alias: infrav1alpha4 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1 + alias: infrav1 + # CAPD exp + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3 + alias: infraexpv1alpha3 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha4 + alias: infraexpv1alpha4 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1 + alias: infraexpv1 nolintlint: allow-unused: false allow-leading-space: false diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index 415f9455e734..d761d7adc942 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -20,13 +20,13 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.Cluster) + dst := dstRaw.(*clusterv1.Cluster) if err := Convert_v1alpha3_Cluster_To_v1beta1_Cluster(src, dst, nil); err != nil { return err @@ -36,11 +36,11 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { // will be "now". See https://github.com/kubernetes-sigs/cluster-api/issues/3798#issuecomment-708619826 for more // discussion. if src.Status.ControlPlaneInitialized { - conditions.MarkTrue(dst, v1beta1.ControlPlaneInitializedCondition) + conditions.MarkTrue(dst, clusterv1.ControlPlaneInitializedCondition) } // Manually restore data. - restored := &v1beta1.Cluster{} + restored := &clusterv1.Cluster{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -53,14 +53,14 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { } func (dst *Cluster) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.Cluster) + src := srcRaw.(*clusterv1.Cluster) if err := Convert_v1beta1_Cluster_To_v1alpha3_Cluster(src, dst, nil); err != nil { return err } // Set the v1alpha3 boolean status field if the v1alpha4 condition was true - if conditions.IsTrue(src, v1beta1.ControlPlaneInitializedCondition) { + if conditions.IsTrue(src, clusterv1.ControlPlaneInitializedCondition) { dst.Status.ControlPlaneInitialized = true } @@ -73,26 +73,26 @@ func (dst *Cluster) ConvertFrom(srcRaw conversion.Hub) error { } func (src *ClusterList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterList) + dst := dstRaw.(*clusterv1.ClusterList) return Convert_v1alpha3_ClusterList_To_v1beta1_ClusterList(src, dst, nil) } func (dst *ClusterList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterList) + src := srcRaw.(*clusterv1.ClusterList) return Convert_v1beta1_ClusterList_To_v1alpha3_ClusterList(src, dst, nil) } func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.Machine) + dst := dstRaw.(*clusterv1.Machine) if err := Convert_v1alpha3_Machine_To_v1beta1_Machine(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.Machine{} + restored := &clusterv1.Machine{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -102,7 +102,7 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { } func (dst *Machine) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.Machine) + src := srcRaw.(*clusterv1.Machine) if err := Convert_v1beta1_Machine_To_v1alpha3_Machine(src, dst, nil); err != nil { return err @@ -117,25 +117,25 @@ func (dst *Machine) ConvertFrom(srcRaw conversion.Hub) error { } func (src *MachineList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineList) + dst := dstRaw.(*clusterv1.MachineList) return Convert_v1alpha3_MachineList_To_v1beta1_MachineList(src, dst, nil) } func (dst *MachineList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineList) + src := srcRaw.(*clusterv1.MachineList) return Convert_v1beta1_MachineList_To_v1alpha3_MachineList(src, dst, nil) } func (src *MachineSet) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineSet) + dst := dstRaw.(*clusterv1.MachineSet) if err := Convert_v1alpha3_MachineSet_To_v1beta1_MachineSet(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.MachineSet{} + restored := &clusterv1.MachineSet{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -144,7 +144,7 @@ func (src *MachineSet) ConvertTo(dstRaw conversion.Hub) error { } func (dst *MachineSet) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineSet) + src := srcRaw.(*clusterv1.MachineSet) if err := Convert_v1beta1_MachineSet_To_v1alpha3_MachineSet(src, dst, nil); err != nil { return err @@ -158,36 +158,36 @@ func (dst *MachineSet) ConvertFrom(srcRaw conversion.Hub) error { } func (src *MachineSetList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineSetList) + dst := dstRaw.(*clusterv1.MachineSetList) return Convert_v1alpha3_MachineSetList_To_v1beta1_MachineSetList(src, dst, nil) } func (dst *MachineSetList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineSetList) + src := srcRaw.(*clusterv1.MachineSetList) return Convert_v1beta1_MachineSetList_To_v1alpha3_MachineSetList(src, dst, nil) } func (src *MachineDeployment) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineDeployment) + dst := dstRaw.(*clusterv1.MachineDeployment) if err := Convert_v1alpha3_MachineDeployment_To_v1beta1_MachineDeployment(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.MachineDeployment{} + restored := &clusterv1.MachineDeployment{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } if restored.Spec.Strategy != nil && restored.Spec.Strategy.RollingUpdate != nil { if dst.Spec.Strategy == nil { - dst.Spec.Strategy = &v1beta1.MachineDeploymentStrategy{} + dst.Spec.Strategy = &clusterv1.MachineDeploymentStrategy{} } if dst.Spec.Strategy.RollingUpdate == nil { - dst.Spec.Strategy.RollingUpdate = &v1beta1.MachineRollingUpdateDeployment{} + dst.Spec.Strategy.RollingUpdate = &clusterv1.MachineRollingUpdateDeployment{} } dst.Spec.Strategy.RollingUpdate.DeletePolicy = restored.Spec.Strategy.RollingUpdate.DeletePolicy } @@ -197,7 +197,7 @@ func (src *MachineDeployment) ConvertTo(dstRaw conversion.Hub) error { } func (dst *MachineDeployment) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineDeployment) + src := srcRaw.(*clusterv1.MachineDeployment) if err := Convert_v1beta1_MachineDeployment_To_v1alpha3_MachineDeployment(src, dst, nil); err != nil { return err @@ -212,26 +212,26 @@ func (dst *MachineDeployment) ConvertFrom(srcRaw conversion.Hub) error { } func (src *MachineDeploymentList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineDeploymentList) + dst := dstRaw.(*clusterv1.MachineDeploymentList) return Convert_v1alpha3_MachineDeploymentList_To_v1beta1_MachineDeploymentList(src, dst, nil) } func (dst *MachineDeploymentList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineDeploymentList) + src := srcRaw.(*clusterv1.MachineDeploymentList) return Convert_v1beta1_MachineDeploymentList_To_v1alpha3_MachineDeploymentList(src, dst, nil) } func (src *MachineHealthCheck) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineHealthCheck) + dst := dstRaw.(*clusterv1.MachineHealthCheck) if err := Convert_v1alpha3_MachineHealthCheck_To_v1beta1_MachineHealthCheck(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.MachineHealthCheck{} + restored := &clusterv1.MachineHealthCheck{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -244,7 +244,7 @@ func (src *MachineHealthCheck) ConvertTo(dstRaw conversion.Hub) error { } func (dst *MachineHealthCheck) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineHealthCheck) + src := srcRaw.(*clusterv1.MachineHealthCheck) if err := Convert_v1beta1_MachineHealthCheck_To_v1alpha3_MachineHealthCheck(src, dst, nil); err != nil { return err @@ -259,57 +259,57 @@ func (dst *MachineHealthCheck) ConvertFrom(srcRaw conversion.Hub) error { } func (src *MachineHealthCheckList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineHealthCheckList) + dst := dstRaw.(*clusterv1.MachineHealthCheckList) return Convert_v1alpha3_MachineHealthCheckList_To_v1beta1_MachineHealthCheckList(src, dst, nil) } func (dst *MachineHealthCheckList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineHealthCheckList) + src := srcRaw.(*clusterv1.MachineHealthCheckList) return Convert_v1beta1_MachineHealthCheckList_To_v1alpha3_MachineHealthCheckList(src, dst, nil) } -func Convert_v1beta1_MachineSetStatus_To_v1alpha3_MachineSetStatus(in *v1beta1.MachineSetStatus, out *MachineSetStatus, s apiconversion.Scope) error { +func Convert_v1beta1_MachineSetStatus_To_v1alpha3_MachineSetStatus(in *clusterv1.MachineSetStatus, out *MachineSetStatus, s apiconversion.Scope) error { // Status.Conditions was introduced in v1alpha4, thus requiring a custom conversion function; the values is going to be preserved in an annotation thus allowing roundtrip without loosing informations return autoConvert_v1beta1_MachineSetStatus_To_v1alpha3_MachineSetStatus(in, out, nil) } -func Convert_v1beta1_ClusterSpec_To_v1alpha3_ClusterSpec(in *v1beta1.ClusterSpec, out *ClusterSpec, s apiconversion.Scope) error { +func Convert_v1beta1_ClusterSpec_To_v1alpha3_ClusterSpec(in *clusterv1.ClusterSpec, out *ClusterSpec, s apiconversion.Scope) error { // NOTE: custom conversion func is required because spec.Topology does not exists in v1alpha3 return autoConvert_v1beta1_ClusterSpec_To_v1alpha3_ClusterSpec(in, out, s) } -func Convert_v1alpha3_Bootstrap_To_v1beta1_Bootstrap(in *Bootstrap, out *v1beta1.Bootstrap, s apiconversion.Scope) error { +func Convert_v1alpha3_Bootstrap_To_v1beta1_Bootstrap(in *Bootstrap, out *clusterv1.Bootstrap, s apiconversion.Scope) error { return autoConvert_v1alpha3_Bootstrap_To_v1beta1_Bootstrap(in, out, s) } -func Convert_v1beta1_MachineRollingUpdateDeployment_To_v1alpha3_MachineRollingUpdateDeployment(in *v1beta1.MachineRollingUpdateDeployment, out *MachineRollingUpdateDeployment, s apiconversion.Scope) error { +func Convert_v1beta1_MachineRollingUpdateDeployment_To_v1alpha3_MachineRollingUpdateDeployment(in *clusterv1.MachineRollingUpdateDeployment, out *MachineRollingUpdateDeployment, s apiconversion.Scope) error { return autoConvert_v1beta1_MachineRollingUpdateDeployment_To_v1alpha3_MachineRollingUpdateDeployment(in, out, s) } -func Convert_v1beta1_MachineHealthCheckSpec_To_v1alpha3_MachineHealthCheckSpec(in *v1beta1.MachineHealthCheckSpec, out *MachineHealthCheckSpec, s apiconversion.Scope) error { +func Convert_v1beta1_MachineHealthCheckSpec_To_v1alpha3_MachineHealthCheckSpec(in *clusterv1.MachineHealthCheckSpec, out *MachineHealthCheckSpec, s apiconversion.Scope) error { return autoConvert_v1beta1_MachineHealthCheckSpec_To_v1alpha3_MachineHealthCheckSpec(in, out, s) } -func Convert_v1alpha3_ClusterStatus_To_v1beta1_ClusterStatus(in *ClusterStatus, out *v1beta1.ClusterStatus, s apiconversion.Scope) error { +func Convert_v1alpha3_ClusterStatus_To_v1beta1_ClusterStatus(in *ClusterStatus, out *clusterv1.ClusterStatus, s apiconversion.Scope) error { return autoConvert_v1alpha3_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s) } -func Convert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *v1beta1.ObjectMeta, s apiconversion.Scope) error { +func Convert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1.ObjectMeta, s apiconversion.Scope) error { return autoConvert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in, out, s) } -func Convert_v1beta1_MachineStatus_To_v1alpha3_MachineStatus(in *v1beta1.MachineStatus, out *MachineStatus, s apiconversion.Scope) error { +func Convert_v1beta1_MachineStatus_To_v1alpha3_MachineStatus(in *clusterv1.MachineStatus, out *MachineStatus, s apiconversion.Scope) error { return autoConvert_v1beta1_MachineStatus_To_v1alpha3_MachineStatus(in, out, s) } -func Convert_v1beta1_MachineDeploymentStatus_To_v1alpha3_MachineDeploymentStatus(in *v1beta1.MachineDeploymentStatus, out *MachineDeploymentStatus, s apiconversion.Scope) error { +func Convert_v1beta1_MachineDeploymentStatus_To_v1alpha3_MachineDeploymentStatus(in *clusterv1.MachineDeploymentStatus, out *MachineDeploymentStatus, s apiconversion.Scope) error { // Status.Conditions was introduced in v1alpha4, thus requiring a custom conversion function; the values is going to be preserved in an annotation thus allowing roundtrip without loosing informations return autoConvert_v1beta1_MachineDeploymentStatus_To_v1alpha3_MachineDeploymentStatus(in, out, s) } -func Convert_v1alpha3_MachineStatus_To_v1beta1_MachineStatus(in *MachineStatus, out *v1beta1.MachineStatus, s apiconversion.Scope) error { +func Convert_v1alpha3_MachineStatus_To_v1beta1_MachineStatus(in *MachineStatus, out *clusterv1.MachineStatus, s apiconversion.Scope) error { // Status.version has been removed in v1beta1, thus requiring custom conversion function. the information will be dropped. return autoConvert_v1alpha3_MachineStatus_To_v1beta1_MachineStatus(in, out, s) } diff --git a/api/v1alpha3/conversion_test.go b/api/v1alpha3/conversion_test.go index 067514ceda12..f685a42566dd 100644 --- a/api/v1alpha3/conversion_test.go +++ b/api/v1alpha3/conversion_test.go @@ -25,38 +25,38 @@ import ( runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for Cluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.Cluster{}, + Hub: &clusterv1.Cluster{}, Spoke: &Cluster{}, SpokeAfterMutation: clusterSpokeAfterMutation, FuzzerFuncs: []fuzzer.FuzzerFuncs{ClusterJSONFuzzFuncs}, })) t.Run("for Machine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.Machine{}, + Hub: &clusterv1.Machine{}, Spoke: &Machine{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{BootstrapFuzzFuncs, MachineStatusFuzzFunc}, })) t.Run("for MachineSet", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.MachineSet{}, + Hub: &clusterv1.MachineSet{}, Spoke: &MachineSet{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{BootstrapFuzzFuncs, CustomObjectMetaFuzzFunc}, })) t.Run("for MachineDeployment", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.MachineDeployment{}, + Hub: &clusterv1.MachineDeployment{}, Spoke: &MachineDeployment{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{BootstrapFuzzFuncs, CustomObjectMetaFuzzFunc}, })) t.Run("for MachineHealthCheck", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.MachineHealthCheck{}, + Hub: &clusterv1.MachineHealthCheck{}, Spoke: &MachineHealthCheck{}, })) } @@ -118,7 +118,7 @@ func clusterSpokeAfterMutation(c conversion.Convertible) { condition := cluster.Status.Conditions[i] // Keep everything that is not ControlPlaneInitializedCondition - if condition.Type != ConditionType(v1beta1.ControlPlaneInitializedCondition) { + if condition.Type != ConditionType(clusterv1.ControlPlaneInitializedCondition) { tmp = append(tmp, condition) } } @@ -133,7 +133,7 @@ func ClusterJSONFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } -func ClusterVariableFuzzer(in *v1beta1.ClusterVariable, c fuzz.Continue) { +func ClusterVariableFuzzer(in *clusterv1.ClusterVariable, c fuzz.Continue) { c.FuzzNoCustom(in) // Not every random byte array is valid JSON, e.g. a string without `""`,so we're setting a valid value. diff --git a/api/v1alpha4/conversion.go b/api/v1alpha4/conversion.go index ef21c44975ca..c948e53cdaa6 100644 --- a/api/v1alpha4/conversion.go +++ b/api/v1alpha4/conversion.go @@ -20,19 +20,19 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.Cluster) + dst := dstRaw.(*clusterv1.Cluster) if err := Convert_v1alpha4_Cluster_To_v1beta1_Cluster(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.Cluster{} + restored := &clusterv1.Cluster{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -45,7 +45,7 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { } func (dst *Cluster) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.Cluster) + src := srcRaw.(*clusterv1.Cluster) if err := Convert_v1beta1_Cluster_To_v1alpha4_Cluster(src, dst, nil); err != nil { return err @@ -60,26 +60,26 @@ func (dst *Cluster) ConvertFrom(srcRaw conversion.Hub) error { } func (src *ClusterList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterList) + dst := dstRaw.(*clusterv1.ClusterList) return Convert_v1alpha4_ClusterList_To_v1beta1_ClusterList(src, dst, nil) } func (dst *ClusterList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterList) + src := srcRaw.(*clusterv1.ClusterList) return Convert_v1beta1_ClusterList_To_v1alpha4_ClusterList(src, dst, nil) } func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterClass) + dst := dstRaw.(*clusterv1.ClusterClass) if err := Convert_v1alpha4_ClusterClass_To_v1beta1_ClusterClass(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.ClusterClass{} + restored := &clusterv1.ClusterClass{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -91,7 +91,7 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { } func (dst *ClusterClass) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterClass) + src := srcRaw.(*clusterv1.ClusterClass) if err := Convert_v1beta1_ClusterClass_To_v1alpha4_ClusterClass(src, dst, nil); err != nil { return err @@ -106,124 +106,124 @@ func (dst *ClusterClass) ConvertFrom(srcRaw conversion.Hub) error { } func (src *ClusterClassList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterClassList) + dst := dstRaw.(*clusterv1.ClusterClassList) return Convert_v1alpha4_ClusterClassList_To_v1beta1_ClusterClassList(src, dst, nil) } func (dst *ClusterClassList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterClassList) + src := srcRaw.(*clusterv1.ClusterClassList) return Convert_v1beta1_ClusterClassList_To_v1alpha4_ClusterClassList(src, dst, nil) } func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.Machine) + dst := dstRaw.(*clusterv1.Machine) return Convert_v1alpha4_Machine_To_v1beta1_Machine(src, dst, nil) } func (dst *Machine) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.Machine) + src := srcRaw.(*clusterv1.Machine) return Convert_v1beta1_Machine_To_v1alpha4_Machine(src, dst, nil) } func (src *MachineList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineList) + dst := dstRaw.(*clusterv1.MachineList) return Convert_v1alpha4_MachineList_To_v1beta1_MachineList(src, dst, nil) } func (dst *MachineList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineList) + src := srcRaw.(*clusterv1.MachineList) return Convert_v1beta1_MachineList_To_v1alpha4_MachineList(src, dst, nil) } func (src *MachineSet) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineSet) + dst := dstRaw.(*clusterv1.MachineSet) return Convert_v1alpha4_MachineSet_To_v1beta1_MachineSet(src, dst, nil) } func (dst *MachineSet) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineSet) + src := srcRaw.(*clusterv1.MachineSet) return Convert_v1beta1_MachineSet_To_v1alpha4_MachineSet(src, dst, nil) } func (src *MachineSetList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineSetList) + dst := dstRaw.(*clusterv1.MachineSetList) return Convert_v1alpha4_MachineSetList_To_v1beta1_MachineSetList(src, dst, nil) } func (dst *MachineSetList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineSetList) + src := srcRaw.(*clusterv1.MachineSetList) return Convert_v1beta1_MachineSetList_To_v1alpha4_MachineSetList(src, dst, nil) } func (src *MachineDeployment) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineDeployment) + dst := dstRaw.(*clusterv1.MachineDeployment) return Convert_v1alpha4_MachineDeployment_To_v1beta1_MachineDeployment(src, dst, nil) } func (dst *MachineDeployment) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineDeployment) + src := srcRaw.(*clusterv1.MachineDeployment) return Convert_v1beta1_MachineDeployment_To_v1alpha4_MachineDeployment(src, dst, nil) } func (src *MachineDeploymentList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineDeploymentList) + dst := dstRaw.(*clusterv1.MachineDeploymentList) return Convert_v1alpha4_MachineDeploymentList_To_v1beta1_MachineDeploymentList(src, dst, nil) } func (dst *MachineDeploymentList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineDeploymentList) + src := srcRaw.(*clusterv1.MachineDeploymentList) return Convert_v1beta1_MachineDeploymentList_To_v1alpha4_MachineDeploymentList(src, dst, nil) } func (src *MachineHealthCheck) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineHealthCheck) + dst := dstRaw.(*clusterv1.MachineHealthCheck) return Convert_v1alpha4_MachineHealthCheck_To_v1beta1_MachineHealthCheck(src, dst, nil) } func (dst *MachineHealthCheck) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineHealthCheck) + src := srcRaw.(*clusterv1.MachineHealthCheck) return Convert_v1beta1_MachineHealthCheck_To_v1alpha4_MachineHealthCheck(src, dst, nil) } func (src *MachineHealthCheckList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachineHealthCheckList) + dst := dstRaw.(*clusterv1.MachineHealthCheckList) return Convert_v1alpha4_MachineHealthCheckList_To_v1beta1_MachineHealthCheckList(src, dst, nil) } func (dst *MachineHealthCheckList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachineHealthCheckList) + src := srcRaw.(*clusterv1.MachineHealthCheckList) return Convert_v1beta1_MachineHealthCheckList_To_v1alpha4_MachineHealthCheckList(src, dst, nil) } -func Convert_v1alpha4_MachineStatus_To_v1beta1_MachineStatus(in *MachineStatus, out *v1beta1.MachineStatus, s apiconversion.Scope) error { +func Convert_v1alpha4_MachineStatus_To_v1beta1_MachineStatus(in *MachineStatus, out *clusterv1.MachineStatus, s apiconversion.Scope) error { // Status.version has been removed in v1beta1, thus requiring custom conversion function. the information will be dropped. return autoConvert_v1alpha4_MachineStatus_To_v1beta1_MachineStatus(in, out, s) } -func Convert_v1beta1_ClusterClassSpec_To_v1alpha4_ClusterClassSpec(in *v1beta1.ClusterClassSpec, out *ClusterClassSpec, s apiconversion.Scope) error { +func Convert_v1beta1_ClusterClassSpec_To_v1alpha4_ClusterClassSpec(in *clusterv1.ClusterClassSpec, out *ClusterClassSpec, s apiconversion.Scope) error { // spec.{variables,patches} has been added with v1beta1. return autoConvert_v1beta1_ClusterClassSpec_To_v1alpha4_ClusterClassSpec(in, out, s) } -func Convert_v1beta1_Topology_To_v1alpha4_Topology(in *v1beta1.Topology, out *Topology, s apiconversion.Scope) error { +func Convert_v1beta1_Topology_To_v1alpha4_Topology(in *clusterv1.Topology, out *Topology, s apiconversion.Scope) error { // spec.topology.variables has been added with v1beta1. return autoConvert_v1beta1_Topology_To_v1alpha4_Topology(in, out, s) } diff --git a/api/v1alpha4/conversion_test.go b/api/v1alpha4/conversion_test.go index bda2c9470ff2..4831375d695e 100644 --- a/api/v1alpha4/conversion_test.go +++ b/api/v1alpha4/conversion_test.go @@ -26,40 +26,40 @@ import ( runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/utils/pointer" - "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for Cluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.Cluster{}, + Hub: &clusterv1.Cluster{}, Spoke: &Cluster{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{ClusterJSONFuzzFuncs}, })) t.Run("for ClusterClass", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.ClusterClass{}, + Hub: &clusterv1.ClusterClass{}, Spoke: &ClusterClass{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{ClusterClassJSONFuzzFuncs}, })) t.Run("for Machine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.Machine{}, + Hub: &clusterv1.Machine{}, Spoke: &Machine{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{MachineStatusFuzzFunc}, })) t.Run("for MachineSet", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.MachineSet{}, + Hub: &clusterv1.MachineSet{}, Spoke: &MachineSet{}, })) t.Run("for MachineDeployment", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.MachineDeployment{}, + Hub: &clusterv1.MachineDeployment{}, Spoke: &MachineDeployment{}, })) t.Run("for MachineHealthCheck", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.MachineHealthCheck{}, + Hub: &clusterv1.MachineHealthCheck{}, Spoke: &MachineHealthCheck{}, })) } @@ -84,7 +84,7 @@ func ClusterJSONFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } -func ClusterVariableFuzzer(in *v1beta1.ClusterVariable, c fuzz.Continue) { +func ClusterVariableFuzzer(in *clusterv1.ClusterVariable, c fuzz.Continue) { c.FuzzNoCustom(in) // Not every random byte array is valid JSON, e.g. a string without `""`,so we're setting a valid value. @@ -98,14 +98,14 @@ func ClusterClassJSONFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } -func JSONPatchFuzzer(in *v1beta1.JSONPatch, c fuzz.Continue) { +func JSONPatchFuzzer(in *clusterv1.JSONPatch, c fuzz.Continue) { c.FuzzNoCustom(in) // Not every random byte array is valid JSON, e.g. a string without `""`,so we're setting a valid value. in.Value = &apiextensionsv1.JSON{Raw: []byte("5")} } -func JSONSchemaPropsFuzzer(in *v1beta1.JSONSchemaProps, c fuzz.Continue) { +func JSONSchemaPropsFuzzer(in *clusterv1.JSONSchemaProps, c fuzz.Continue) { // NOTE: We have to fuzz the individual fields manually, // because we cannot call `FuzzNoCustom` as it would lead // to an infinite recursion. @@ -136,7 +136,7 @@ func JSONSchemaPropsFuzzer(in *v1beta1.JSONSchemaProps, c fuzz.Continue) { // We're using a copy of the current JSONSchemaProps, // because we cannot recursively fuzz new schemas. - in.Properties = map[string]v1beta1.JSONSchemaProps{} + in.Properties = map[string]clusterv1.JSONSchemaProps{} for i := 0; i < c.Intn(10); i++ { in.Properties[c.RandString()] = *in.DeepCopy() } diff --git a/bootstrap/kubeadm/api/v1alpha3/condition_consts.go b/bootstrap/kubeadm/api/v1alpha3/condition_consts.go index 7da7d983afe6..eea6a450132e 100644 --- a/bootstrap/kubeadm/api/v1alpha3/condition_consts.go +++ b/bootstrap/kubeadm/api/v1alpha3/condition_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1alpha3 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" // Conditions and condition Reasons for the KubeadmConfig object. @@ -26,7 +26,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition clusterv1alpha3.ConditionType = "DataSecretAvailable" // WaitingForClusterInfrastructureReason (Severity=Info) document a bootstrap secret generation process // waiting for the cluster infrastructure to be ready. @@ -55,7 +55,7 @@ const ( // machine, if the cluster is not using a control plane ref object, if the certificates are not provided // by the users. // IMPORTANT: This condition won't be re-created after clusterctl move. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1alpha3.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmConfig controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller diff --git a/bootstrap/kubeadm/api/v1alpha3/conversion.go b/bootstrap/kubeadm/api/v1alpha3/conversion.go index f0ce83ce2e43..35a84e2705f4 100644 --- a/bootstrap/kubeadm/api/v1alpha3/conversion.go +++ b/bootstrap/kubeadm/api/v1alpha3/conversion.go @@ -20,34 +20,34 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *KubeadmConfig) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfig) + dst := dstRaw.(*bootstrapv1.KubeadmConfig) if err := Convert_v1alpha3_KubeadmConfig_To_v1beta1_KubeadmConfig(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.KubeadmConfig{} + restored := &bootstrapv1.KubeadmConfig{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } if restored.Spec.JoinConfiguration != nil && restored.Spec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors != nil { if dst.Spec.JoinConfiguration == nil { - dst.Spec.JoinConfiguration = &v1beta1.JoinConfiguration{} + dst.Spec.JoinConfiguration = &bootstrapv1.JoinConfiguration{} } dst.Spec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = restored.Spec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors } if restored.Spec.InitConfiguration != nil && restored.Spec.InitConfiguration.NodeRegistration.IgnorePreflightErrors != nil { if dst.Spec.InitConfiguration == nil { - dst.Spec.InitConfiguration = &v1beta1.InitConfiguration{} + dst.Spec.InitConfiguration = &bootstrapv1.InitConfiguration{} } dst.Spec.InitConfiguration.NodeRegistration.IgnorePreflightErrors = restored.Spec.InitConfiguration.NodeRegistration.IgnorePreflightErrors } @@ -58,7 +58,7 @@ func (src *KubeadmConfig) ConvertTo(dstRaw conversion.Hub) error { } func (dst *KubeadmConfig) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfig) + src := srcRaw.(*bootstrapv1.KubeadmConfig) if err := Convert_v1beta1_KubeadmConfig_To_v1alpha3_KubeadmConfig(src, dst, nil); err != nil { return err @@ -73,40 +73,40 @@ func (dst *KubeadmConfig) ConvertFrom(srcRaw conversion.Hub) error { } func (src *KubeadmConfigList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfigList) + dst := dstRaw.(*bootstrapv1.KubeadmConfigList) return Convert_v1alpha3_KubeadmConfigList_To_v1beta1_KubeadmConfigList(src, dst, nil) } func (dst *KubeadmConfigList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfigList) + src := srcRaw.(*bootstrapv1.KubeadmConfigList) return Convert_v1beta1_KubeadmConfigList_To_v1alpha3_KubeadmConfigList(src, dst, nil) } func (src *KubeadmConfigTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfigTemplate) + dst := dstRaw.(*bootstrapv1.KubeadmConfigTemplate) if err := Convert_v1alpha3_KubeadmConfigTemplate_To_v1beta1_KubeadmConfigTemplate(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.KubeadmConfigTemplate{} + restored := &bootstrapv1.KubeadmConfigTemplate{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } if restored.Spec.Template.Spec.JoinConfiguration != nil && restored.Spec.Template.Spec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors != nil { if dst.Spec.Template.Spec.JoinConfiguration == nil { - dst.Spec.Template.Spec.JoinConfiguration = &v1beta1.JoinConfiguration{} + dst.Spec.Template.Spec.JoinConfiguration = &bootstrapv1.JoinConfiguration{} } dst.Spec.Template.Spec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = restored.Spec.Template.Spec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors } if restored.Spec.Template.Spec.InitConfiguration != nil && restored.Spec.Template.Spec.InitConfiguration.NodeRegistration.IgnorePreflightErrors != nil { if dst.Spec.Template.Spec.InitConfiguration == nil { - dst.Spec.Template.Spec.InitConfiguration = &v1beta1.InitConfiguration{} + dst.Spec.Template.Spec.InitConfiguration = &bootstrapv1.InitConfiguration{} } dst.Spec.Template.Spec.InitConfiguration.NodeRegistration.IgnorePreflightErrors = restored.Spec.Template.Spec.InitConfiguration.NodeRegistration.IgnorePreflightErrors } @@ -117,7 +117,7 @@ func (src *KubeadmConfigTemplate) ConvertTo(dstRaw conversion.Hub) error { } func (dst *KubeadmConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfigTemplate) + src := srcRaw.(*bootstrapv1.KubeadmConfigTemplate) if err := Convert_v1beta1_KubeadmConfigTemplate_To_v1alpha3_KubeadmConfigTemplate(src, dst, nil); err != nil { return err @@ -132,23 +132,23 @@ func (dst *KubeadmConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error { } func (src *KubeadmConfigTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfigTemplateList) + dst := dstRaw.(*bootstrapv1.KubeadmConfigTemplateList) return Convert_v1alpha3_KubeadmConfigTemplateList_To_v1beta1_KubeadmConfigTemplateList(src, dst, nil) } func (dst *KubeadmConfigTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfigTemplateList) + src := srcRaw.(*bootstrapv1.KubeadmConfigTemplateList) return Convert_v1beta1_KubeadmConfigTemplateList_To_v1alpha3_KubeadmConfigTemplateList(src, dst, nil) } -func Convert_v1alpha3_KubeadmConfigStatus_To_v1beta1_KubeadmConfigStatus(in *KubeadmConfigStatus, out *v1beta1.KubeadmConfigStatus, s apiconversion.Scope) error { +func Convert_v1alpha3_KubeadmConfigStatus_To_v1beta1_KubeadmConfigStatus(in *KubeadmConfigStatus, out *bootstrapv1.KubeadmConfigStatus, s apiconversion.Scope) error { // KubeadmConfigStatus.BootstrapData has been removed in v1alpha4 because its content has been moved to the bootstrap data secret, value will be lost during conversion. return autoConvert_v1alpha3_KubeadmConfigStatus_To_v1beta1_KubeadmConfigStatus(in, out, s) } -func Convert_v1beta1_ClusterConfiguration_To_upstreamv1beta1_ClusterConfiguration(in *v1beta1.ClusterConfiguration, out *upstreamv1beta1.ClusterConfiguration, s apiconversion.Scope) error { +func Convert_v1beta1_ClusterConfiguration_To_upstreamv1beta1_ClusterConfiguration(in *bootstrapv1.ClusterConfiguration, out *upstreamv1beta1.ClusterConfiguration, s apiconversion.Scope) error { // DNS.Type was removed in v1alpha4 because only CoreDNS is supported; the information will be left to empty (kubeadm defaults it to CoredDNS); // Existing clusters using kube-dns or other DNS solutions will continue to be managed/supported via the skip-coredns annotation. @@ -156,34 +156,34 @@ func Convert_v1beta1_ClusterConfiguration_To_upstreamv1beta1_ClusterConfiguratio return upstreamv1beta1.Convert_v1beta1_ClusterConfiguration_To_upstreamv1beta1_ClusterConfiguration(in, out, s) } -func Convert_upstreamv1beta1_ClusterConfiguration_To_v1beta1_ClusterConfiguration(in *upstreamv1beta1.ClusterConfiguration, out *v1beta1.ClusterConfiguration, s apiconversion.Scope) error { +func Convert_upstreamv1beta1_ClusterConfiguration_To_v1beta1_ClusterConfiguration(in *upstreamv1beta1.ClusterConfiguration, out *bootstrapv1.ClusterConfiguration, s apiconversion.Scope) error { // DNS.Type was removed in v1alpha4 because only CoreDNS is supported; the information will be left to empty (kubeadm defaults it to CoredDNS); // ClusterConfiguration.UseHyperKubeImage was removed in kubeadm v1alpha4 API return upstreamv1beta1.Convert_upstreamv1beta1_ClusterConfiguration_To_v1beta1_ClusterConfiguration(in, out, s) } -func Convert_upstreamv1beta1_InitConfiguration_To_v1beta1_InitConfiguration(in *upstreamv1beta1.InitConfiguration, out *v1beta1.InitConfiguration, s apiconversion.Scope) error { +func Convert_upstreamv1beta1_InitConfiguration_To_v1beta1_InitConfiguration(in *upstreamv1beta1.InitConfiguration, out *bootstrapv1.InitConfiguration, s apiconversion.Scope) error { // NodeRegistrationOptions.IgnorePreflightErrors does not exist in kubeadm v1beta1 API return upstreamv1beta1.Convert_upstreamv1beta1_InitConfiguration_To_v1beta1_InitConfiguration(in, out, s) } -func Convert_v1beta1_InitConfiguration_To_upstreamv1beta1_InitConfiguration(in *v1beta1.InitConfiguration, out *upstreamv1beta1.InitConfiguration, s apiconversion.Scope) error { +func Convert_v1beta1_InitConfiguration_To_upstreamv1beta1_InitConfiguration(in *bootstrapv1.InitConfiguration, out *upstreamv1beta1.InitConfiguration, s apiconversion.Scope) error { // NodeRegistrationOptions.IgnorePreflightErrors does not exist in kubeadm v1beta1 API return upstreamv1beta1.Convert_v1beta1_InitConfiguration_To_upstreamv1beta1_InitConfiguration(in, out, s) } -func Convert_upstreamv1beta1_JoinConfiguration_To_v1beta1_JoinConfiguration(in *upstreamv1beta1.JoinConfiguration, out *v1beta1.JoinConfiguration, s apiconversion.Scope) error { +func Convert_upstreamv1beta1_JoinConfiguration_To_v1beta1_JoinConfiguration(in *upstreamv1beta1.JoinConfiguration, out *bootstrapv1.JoinConfiguration, s apiconversion.Scope) error { // NodeRegistrationOptions.IgnorePreflightErrors does not exist in kubeadm v1beta1 API return upstreamv1beta1.Convert_upstreamv1beta1_JoinConfiguration_To_v1beta1_JoinConfiguration(in, out, s) } -func Convert_v1beta1_JoinConfiguration_To_upstreamv1beta1_JoinConfiguration(in *v1beta1.JoinConfiguration, out *upstreamv1beta1.JoinConfiguration, s apiconversion.Scope) error { +func Convert_v1beta1_JoinConfiguration_To_upstreamv1beta1_JoinConfiguration(in *bootstrapv1.JoinConfiguration, out *upstreamv1beta1.JoinConfiguration, s apiconversion.Scope) error { // NodeRegistrationOptions.IgnorePreflightErrors does not exist in kubeadm v1beta1 API return upstreamv1beta1.Convert_v1beta1_JoinConfiguration_To_upstreamv1beta1_JoinConfiguration(in, out, s) } // Convert_v1beta1_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec is an autogenerated conversion function. -func Convert_v1beta1_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec(in *v1beta1.KubeadmConfigSpec, out *KubeadmConfigSpec, s apiconversion.Scope) error { +func Convert_v1beta1_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec(in *bootstrapv1.KubeadmConfigSpec, out *KubeadmConfigSpec, s apiconversion.Scope) error { // KubeadmConfigSpec.Ignition does not exist in kubeadm v1alpha3 API. return autoConvert_v1beta1_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec(in, out, s) } diff --git a/bootstrap/kubeadm/api/v1alpha3/conversion_test.go b/bootstrap/kubeadm/api/v1alpha3/conversion_test.go index f405d1bf787c..67a5e7e4d8e1 100644 --- a/bootstrap/kubeadm/api/v1alpha3/conversion_test.go +++ b/bootstrap/kubeadm/api/v1alpha3/conversion_test.go @@ -23,19 +23,19 @@ import ( "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for KubeadmConfig", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.KubeadmConfig{}, + Hub: &bootstrapv1.KubeadmConfig{}, Spoke: &KubeadmConfig{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for KubeadmConfigTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.KubeadmConfigTemplate{}, + Hub: &bootstrapv1.KubeadmConfigTemplate{}, Spoke: &KubeadmConfigTemplate{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) @@ -87,7 +87,7 @@ func kubeadmBootstrapTokenStringFuzzerV1UpstreamBeta1(in *upstreamv1beta1.Bootst in.Secret = "abcdef0123456789" } -func kubeadmBootstrapTokenStringFuzzerV1Beta1(in *v1beta1.BootstrapTokenString, c fuzz.Continue) { +func kubeadmBootstrapTokenStringFuzzerV1Beta1(in *bootstrapv1.BootstrapTokenString, c fuzz.Continue) { in.ID = "abcdef" in.Secret = "abcdef0123456789" } diff --git a/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go b/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go index a37539193371..ea29c29455e2 100644 --- a/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go +++ b/bootstrap/kubeadm/api/v1alpha3/kubeadmconfig_types.go @@ -19,7 +19,7 @@ package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" ) @@ -128,7 +128,7 @@ type KubeadmConfigStatus struct { // Conditions defines current service state of the KubeadmConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -145,12 +145,12 @@ type KubeadmConfig struct { } // GetConditions returns the set of conditions for this object. -func (c *KubeadmConfig) GetConditions() clusterv1.Conditions { +func (c *KubeadmConfig) GetConditions() clusterv1alpha3.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *KubeadmConfig) SetConditions(conditions clusterv1.Conditions) { +func (c *KubeadmConfig) SetConditions(conditions clusterv1alpha3.Conditions) { c.Status.Conditions = conditions } diff --git a/bootstrap/kubeadm/api/v1alpha4/condition_consts.go b/bootstrap/kubeadm/api/v1alpha4/condition_consts.go index 4a997b4b51b8..0b288997f7b4 100644 --- a/bootstrap/kubeadm/api/v1alpha4/condition_consts.go +++ b/bootstrap/kubeadm/api/v1alpha4/condition_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1alpha4 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" +import clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" // Conditions and condition Reasons for the KubeadmConfig object. @@ -26,7 +26,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition clusterv1alpha4.ConditionType = "DataSecretAvailable" // WaitingForClusterInfrastructureReason (Severity=Info) document a bootstrap secret generation process // waiting for the cluster infrastructure to be ready. @@ -48,7 +48,7 @@ const ( // machine, if the cluster is not using a control plane ref object, if the certificates are not provided // by the users. // IMPORTANT: This condition won't be re-created after clusterctl move. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1alpha4.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmConfig controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller diff --git a/bootstrap/kubeadm/api/v1alpha4/conversion.go b/bootstrap/kubeadm/api/v1alpha4/conversion.go index b348fb89f521..9608da608700 100644 --- a/bootstrap/kubeadm/api/v1alpha4/conversion.go +++ b/bootstrap/kubeadm/api/v1alpha4/conversion.go @@ -20,19 +20,19 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *KubeadmConfig) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfig) + dst := dstRaw.(*bootstrapv1.KubeadmConfig) if err := Convert_v1alpha4_KubeadmConfig_To_v1beta1_KubeadmConfig(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.KubeadmConfig{} + restored := &bootstrapv1.KubeadmConfig{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -43,7 +43,7 @@ func (src *KubeadmConfig) ConvertTo(dstRaw conversion.Hub) error { } func (dst *KubeadmConfig) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfig) + src := srcRaw.(*bootstrapv1.KubeadmConfig) if err := Convert_v1beta1_KubeadmConfig_To_v1alpha4_KubeadmConfig(src, dst, nil); err != nil { return err @@ -53,26 +53,26 @@ func (dst *KubeadmConfig) ConvertFrom(srcRaw conversion.Hub) error { } func (src *KubeadmConfigList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfigList) + dst := dstRaw.(*bootstrapv1.KubeadmConfigList) return Convert_v1alpha4_KubeadmConfigList_To_v1beta1_KubeadmConfigList(src, dst, nil) } func (dst *KubeadmConfigList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfigList) + src := srcRaw.(*bootstrapv1.KubeadmConfigList) return Convert_v1beta1_KubeadmConfigList_To_v1alpha4_KubeadmConfigList(src, dst, nil) } func (src *KubeadmConfigTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfigTemplate) + dst := dstRaw.(*bootstrapv1.KubeadmConfigTemplate) if err := Convert_v1alpha4_KubeadmConfigTemplate_To_v1beta1_KubeadmConfigTemplate(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.KubeadmConfigTemplate{} + restored := &bootstrapv1.KubeadmConfigTemplate{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -83,7 +83,7 @@ func (src *KubeadmConfigTemplate) ConvertTo(dstRaw conversion.Hub) error { } func (dst *KubeadmConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfigTemplate) + src := srcRaw.(*bootstrapv1.KubeadmConfigTemplate) if err := Convert_v1beta1_KubeadmConfigTemplate_To_v1alpha4_KubeadmConfigTemplate(src, dst, nil); err != nil { return err @@ -93,19 +93,19 @@ func (dst *KubeadmConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error { } func (src *KubeadmConfigTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmConfigTemplateList) + dst := dstRaw.(*bootstrapv1.KubeadmConfigTemplateList) return Convert_v1alpha4_KubeadmConfigTemplateList_To_v1beta1_KubeadmConfigTemplateList(src, dst, nil) } func (dst *KubeadmConfigTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmConfigTemplateList) + src := srcRaw.(*bootstrapv1.KubeadmConfigTemplateList) return Convert_v1beta1_KubeadmConfigTemplateList_To_v1alpha4_KubeadmConfigTemplateList(src, dst, nil) } // Convert_v1beta1_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec is an autogenerated conversion function. -func Convert_v1beta1_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(in *v1beta1.KubeadmConfigSpec, out *KubeadmConfigSpec, s apiconversion.Scope) error { +func Convert_v1beta1_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(in *bootstrapv1.KubeadmConfigSpec, out *KubeadmConfigSpec, s apiconversion.Scope) error { // KubeadmConfigSpec.Ignition does not exist in kubeadm v1alpha4 API. return autoConvert_v1beta1_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(in, out, s) } diff --git a/bootstrap/kubeadm/api/v1alpha4/conversion_test.go b/bootstrap/kubeadm/api/v1alpha4/conversion_test.go index b05f8037b958..3e37138abb2b 100644 --- a/bootstrap/kubeadm/api/v1alpha4/conversion_test.go +++ b/bootstrap/kubeadm/api/v1alpha4/conversion_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -35,12 +35,12 @@ const ( func TestFuzzyConversion(t *testing.T) { t.Run("for KubeadmConfig", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.KubeadmConfig{}, + Hub: &bootstrapv1.KubeadmConfig{}, Spoke: &KubeadmConfig{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for KubeadmConfigTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.KubeadmConfigTemplate{}, + Hub: &bootstrapv1.KubeadmConfigTemplate{}, Spoke: &KubeadmConfigTemplate{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) @@ -85,7 +85,7 @@ func kubeadmBootstrapTokenStringFuzzerV1UpstreamBeta1(in *upstreamv1beta1.Bootst in.Secret = fakeSecret } -func kubeadmBootstrapTokenStringFuzzerV1Beta1(in *v1beta1.BootstrapTokenString, c fuzz.Continue) { +func kubeadmBootstrapTokenStringFuzzerV1Beta1(in *bootstrapv1.BootstrapTokenString, c fuzz.Continue) { in.ID = fakeID in.Secret = fakeSecret } diff --git a/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go b/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go index 082f0f52191c..3d0fc7d9e77d 100644 --- a/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go +++ b/bootstrap/kubeadm/api/v1alpha4/kubeadmconfig_types.go @@ -19,7 +19,7 @@ package v1alpha4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" ) // Format specifies the output format of the bootstrap data @@ -120,7 +120,7 @@ type KubeadmConfigStatus struct { // Conditions defines current service state of the KubeadmConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -138,12 +138,12 @@ type KubeadmConfig struct { } // GetConditions returns the set of conditions for this object. -func (c *KubeadmConfig) GetConditions() clusterv1.Conditions { +func (c *KubeadmConfig) GetConditions() clusterv1alpha4.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *KubeadmConfig) SetConditions(conditions clusterv1.Conditions) { +func (c *KubeadmConfig) SetConditions(conditions clusterv1alpha4.Conditions) { c.Status.Conditions = conditions } diff --git a/bootstrap/kubeadm/main.go b/bootstrap/kubeadm/main.go index 94827edb6d2a..f6f81139ed9b 100644 --- a/bootstrap/kubeadm/main.go +++ b/bootstrap/kubeadm/main.go @@ -40,9 +40,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmbootstrapv1alpha3 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" - kubeadmbootstrapv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - kubeadmbootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1alpha3 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" + bootstrapv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" kubeadmbootstrapcontrollers "sigs.k8s.io/cluster-api/bootstrap/kubeadm/controllers" "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -61,9 +61,9 @@ func init() { _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) _ = expv1.AddToScheme(scheme) - _ = kubeadmbootstrapv1alpha3.AddToScheme(scheme) - _ = kubeadmbootstrapv1alpha4.AddToScheme(scheme) - _ = kubeadmbootstrapv1.AddToScheme(scheme) + _ = bootstrapv1alpha3.AddToScheme(scheme) + _ = bootstrapv1alpha4.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } @@ -213,11 +213,11 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { } func setupWebhooks(mgr ctrl.Manager) { - if err := (&kubeadmbootstrapv1.KubeadmConfig{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&bootstrapv1.KubeadmConfig{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmConfig") os.Exit(1) } - if err := (&kubeadmbootstrapv1.KubeadmConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&bootstrapv1.KubeadmConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmConfigTemplate") os.Exit(1) } diff --git a/bootstrap/kubeadm/types/upstreamv1beta1/conversion_test.go b/bootstrap/kubeadm/types/upstreamv1beta1/conversion_test.go index 1c1e7aaecf46..ebe8ad97fc05 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta1/conversion_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta1/conversion_test.go @@ -23,34 +23,34 @@ import ( "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for ClusterConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.ClusterConfiguration{}, + Hub: &bootstrapv1.ClusterConfiguration{}, Spoke: &ClusterConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for ClusterStatus", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.ClusterStatus{}, + Hub: &bootstrapv1.ClusterStatus{}, Spoke: &ClusterStatus{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for InitConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.InitConfiguration{}, + Hub: &bootstrapv1.InitConfiguration{}, Spoke: &InitConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for JoinConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.JoinConfiguration{}, + Hub: &bootstrapv1.JoinConfiguration{}, Spoke: &JoinConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, @@ -80,7 +80,7 @@ func clusterConfigurationFuzzer(obj *ClusterConfiguration, c fuzz.Continue) { obj.UseHyperKubeImage = false } -func kubeadmNodeRegistrationOptionsFuzzer(obj *v1beta1.NodeRegistrationOptions, c fuzz.Continue) { +func kubeadmNodeRegistrationOptionsFuzzer(obj *bootstrapv1.NodeRegistrationOptions, c fuzz.Continue) { c.FuzzNoCustom(obj) // NodeRegistrationOptions.IgnorePreflightErrors does not exist in kubeadm v1beta1 API, so setting it to nil in order to avoid diff --git a/bootstrap/kubeadm/types/upstreamv1beta2/conversion_test.go b/bootstrap/kubeadm/types/upstreamv1beta2/conversion_test.go index 715d1901c138..197a30bda753 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta2/conversion_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta2/conversion_test.go @@ -23,34 +23,34 @@ import ( "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for ClusterConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.ClusterConfiguration{}, + Hub: &bootstrapv1.ClusterConfiguration{}, Spoke: &ClusterConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for ClusterStatus", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.ClusterStatus{}, + Hub: &bootstrapv1.ClusterStatus{}, Spoke: &ClusterStatus{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for InitConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.InitConfiguration{}, + Hub: &bootstrapv1.InitConfiguration{}, Spoke: &InitConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for JoinConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.JoinConfiguration{}, + Hub: &bootstrapv1.JoinConfiguration{}, Spoke: &JoinConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go b/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go index 5efbe8983e6a..92afffae5a24 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -33,11 +33,11 @@ func TestFuzzyConversion(t *testing.T) { g := NewWithT(t) scheme := runtime.NewScheme() g.Expect(AddToScheme(scheme)).To(Succeed()) - g.Expect(v1beta1.AddToScheme(scheme)).To(Succeed()) + g.Expect(bootstrapv1.AddToScheme(scheme)).To(Succeed()) t.Run("for ClusterConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, - Hub: &v1beta1.ClusterConfiguration{}, + Hub: &bootstrapv1.ClusterConfiguration{}, Spoke: &ClusterConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, @@ -45,7 +45,7 @@ func TestFuzzyConversion(t *testing.T) { })) t.Run("for InitConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, - Hub: &v1beta1.InitConfiguration{}, + Hub: &bootstrapv1.InitConfiguration{}, Spoke: &InitConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, @@ -53,7 +53,7 @@ func TestFuzzyConversion(t *testing.T) { })) t.Run("for JoinConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, - Hub: &v1beta1.JoinConfiguration{}, + Hub: &bootstrapv1.JoinConfiguration{}, Spoke: &JoinConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, diff --git a/cmd/clusterctl/client/cluster/upgrader_test.go b/cmd/clusterctl/client/cluster/upgrader_test.go index f8c357489a08..3e0c962a9605 100644 --- a/cmd/clusterctl/client/cluster/upgrader_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_test.go @@ -22,7 +22,7 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" - clusterv1v1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/repository" @@ -149,7 +149,7 @@ func Test_providerUpgrader_Plan(t *testing.T) { WithVersions("v1.0.0", "v1.0.1", "v2.0.0", "v3.0.0"). WithMetadata("v3.0.0", &clusterctlv1.Metadata{ ReleaseSeries: []clusterctlv1.ReleaseSeries{ - {Major: 1, Minor: 0, Contract: clusterv1v1alpha3.GroupVersion.Version}, + {Major: 1, Minor: 0, Contract: clusterv1alpha3.GroupVersion.Version}, {Major: 2, Minor: 0, Contract: test.PreviousCAPIContractNotSupported}, {Major: 3, Minor: 0, Contract: test.CurrentCAPIContract}, }, @@ -158,7 +158,7 @@ func Test_providerUpgrader_Plan(t *testing.T) { WithVersions("v1.0.0", "v2.0.0", "v2.0.1", "v3.0.0"). WithMetadata("v3.0.0", &clusterctlv1.Metadata{ ReleaseSeries: []clusterctlv1.ReleaseSeries{ - {Major: 1, Minor: 0, Contract: clusterv1v1alpha3.GroupVersion.Version}, + {Major: 1, Minor: 0, Contract: clusterv1alpha3.GroupVersion.Version}, {Major: 2, Minor: 0, Contract: test.PreviousCAPIContractNotSupported}, {Major: 3, Minor: 0, Contract: test.CurrentCAPIContract}, }, @@ -171,7 +171,7 @@ func Test_providerUpgrader_Plan(t *testing.T) { }, want: []UpgradePlan{ { // one upgrade plan with the latest releases in the v1alpha3 contract (not supported, but upgrade plan should report these options) - Contract: clusterv1v1alpha3.GroupVersion.Version, + Contract: clusterv1alpha3.GroupVersion.Version, Providers: []UpgradeItem{ { Provider: fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"), diff --git a/cmd/clusterctl/client/upgrade.go b/cmd/clusterctl/client/upgrade.go index 267af9538717..2812d3027b85 100644 --- a/cmd/clusterctl/client/upgrade.go +++ b/cmd/clusterctl/client/upgrade.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" - clusterv1old "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -59,7 +59,7 @@ func (c *clusterctlClient) PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePla // this is an exception and support for skipping releases should be removed in future releases. if err := clusterClient.ProviderInventory().CheckCAPIContract( cluster.AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, - cluster.AllowCAPIContract{Contract: clusterv1old.GroupVersion.Version}, + cluster.AllowCAPIContract{Contract: clusterv1alpha4.GroupVersion.Version}, ); err != nil { return nil, err } @@ -125,7 +125,7 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { // this is an exception and support for skipping releases should be removed in future releases. if err := clusterClient.ProviderInventory().CheckCAPIContract( cluster.AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, - cluster.AllowCAPIContract{Contract: clusterv1old.GroupVersion.Version}, + cluster.AllowCAPIContract{Contract: clusterv1alpha4.GroupVersion.Version}, ); err != nil { return err } diff --git a/cmd/clusterctl/internal/test/contracts.go b/cmd/clusterctl/internal/test/contracts.go index df43e010a64c..6b032b2c566e 100644 --- a/cmd/clusterctl/internal/test/contracts.go +++ b/cmd/clusterctl/internal/test/contracts.go @@ -17,12 +17,12 @@ limitations under the License. package test import ( - clusterv1old "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // PreviousCAPIContractNotSupported define the previous Cluster API contract, not supported by this release of clusterctl. -var PreviousCAPIContractNotSupported = clusterv1old.GroupVersion.Version +var PreviousCAPIContractNotSupported = clusterv1alpha4.GroupVersion.Version // CurrentCAPIContract define the current Cluster API contract. var CurrentCAPIContract = clusterv1.GroupVersion.Version diff --git a/controlplane/kubeadm/api/v1alpha3/condition_consts.go b/controlplane/kubeadm/api/v1alpha3/condition_consts.go index da72a64724c4..b7d8e3243548 100644 --- a/controlplane/kubeadm/api/v1alpha3/condition_consts.go +++ b/controlplane/kubeadm/api/v1alpha3/condition_consts.go @@ -16,19 +16,19 @@ limitations under the License. package v1alpha3 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" // Conditions and condition Reasons for the KubeadmControlPlane object. const ( // MachinesReadyCondition reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. - MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" + MachinesReadyCondition clusterv1alpha3.ConditionType = "MachinesReady" ) const ( // CertificatesAvailableCondition documents that cluster certificates were generated as part of the // processing of a a KubeadmControlPlane object. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1alpha3.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmControlPlane controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller @@ -39,7 +39,7 @@ const ( const ( // AvailableCondition documents that the first control plane instance has completed the kubeadm init operation // and so the control plane is available and an API server instance is ready for processing requests. - AvailableCondition clusterv1.ConditionType = "Available" + AvailableCondition clusterv1alpha3.ConditionType = "Available" // WaitingForKubeadmInitReason (Severity=Info) documents a KubeadmControlPlane object waiting for the first // control plane instance to complete the kubeadm init operation. @@ -49,7 +49,7 @@ const ( const ( // MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the KubeadmControlPlane // is up to date. Whe this condition is false, the KubeadmControlPlane is executing a rolling upgrade. - MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate" + MachinesSpecUpToDateCondition clusterv1alpha3.ConditionType = "MachinesSpecUpToDate" // RollingUpdateInProgressReason (Severity=Warning) documents a KubeadmControlPlane object executing a // rolling upgrade for aligning the machines spec to the desired state. @@ -58,7 +58,7 @@ const ( const ( // ResizedCondition documents a KubeadmControlPlane that is resizing the set of controlled machines. - ResizedCondition clusterv1.ConditionType = "Resized" + ResizedCondition clusterv1alpha3.ConditionType = "Resized" // ScalingUpReason (Severity=Info) documents a KubeadmControlPlane that is increasing the number of replicas. ScalingUpReason = "ScalingUp" @@ -71,7 +71,7 @@ const ( // ControlPlaneComponentsHealthyCondition reports the overall status of control plane components // implemented as static pods generated by kubeadm including kube-api-server, kube-controller manager, // kube-scheduler and etcd if managed. - ControlPlaneComponentsHealthyCondition clusterv1.ConditionType = "ControlPlaneComponentsHealthy" + ControlPlaneComponentsHealthyCondition clusterv1alpha3.ConditionType = "ControlPlaneComponentsHealthy" // ControlPlaneComponentsUnhealthyReason (Severity=Error) documents a control plane component not healthy. ControlPlaneComponentsUnhealthyReason = "ControlPlaneComponentsUnhealthy" @@ -83,17 +83,17 @@ const ( ControlPlaneComponentsInspectionFailedReason = "ControlPlaneComponentsInspectionFailed" // MachineAPIServerPodHealthyCondition reports a machine's kube-apiserver's operational status. - MachineAPIServerPodHealthyCondition clusterv1.ConditionType = "APIServerPodHealthy" + MachineAPIServerPodHealthyCondition clusterv1alpha3.ConditionType = "APIServerPodHealthy" // MachineControllerManagerPodHealthyCondition reports a machine's kube-controller-manager's health status. - MachineControllerManagerPodHealthyCondition clusterv1.ConditionType = "ControllerManagerPodHealthy" + MachineControllerManagerPodHealthyCondition clusterv1alpha3.ConditionType = "ControllerManagerPodHealthy" // MachineSchedulerPodHealthyCondition reports a machine's kube-scheduler's operational status. - MachineSchedulerPodHealthyCondition clusterv1.ConditionType = "SchedulerPodHealthy" + MachineSchedulerPodHealthyCondition clusterv1alpha3.ConditionType = "SchedulerPodHealthy" // MachineEtcdPodHealthyCondition reports a machine's etcd pod's operational status. // NOTE: This conditions exists only if a stacked etcd cluster is used. - MachineEtcdPodHealthyCondition clusterv1.ConditionType = "EtcdPodHealthy" + MachineEtcdPodHealthyCondition clusterv1alpha3.ConditionType = "EtcdPodHealthy" // PodProvisioningReason (Severity=Info) documents a pod waiting to be provisioned i.e., Pod is in "Pending" phase. PodProvisioningReason = "PodProvisioning" @@ -111,7 +111,7 @@ const ( const ( // EtcdClusterHealthyCondition documents the overall etcd cluster's health. - EtcdClusterHealthyCondition clusterv1.ConditionType = "EtcdClusterHealthyCondition" + EtcdClusterHealthyCondition clusterv1alpha3.ConditionType = "EtcdClusterHealthyCondition" // EtcdClusterInspectionFailedReason documents a failure in inspecting the etcd cluster status. EtcdClusterInspectionFailedReason = "EtcdClusterInspectionFailed" @@ -124,7 +124,7 @@ const ( // MachineEtcdMemberHealthyCondition report the machine's etcd member's health status. // NOTE: This conditions exists only if a stacked etcd cluster is used. - MachineEtcdMemberHealthyCondition clusterv1.ConditionType = "EtcdMemberHealthy" + MachineEtcdMemberHealthyCondition clusterv1alpha3.ConditionType = "EtcdMemberHealthy" // EtcdMemberInspectionFailedReason documents a failure in inspecting the etcd member status. EtcdMemberInspectionFailedReason = "MemberInspectionFailed" diff --git a/controlplane/kubeadm/api/v1alpha3/conversion.go b/controlplane/kubeadm/api/v1alpha3/conversion.go index 3fe3138fdda1..648aaf65fb83 100644 --- a/controlplane/kubeadm/api/v1alpha3/conversion.go +++ b/controlplane/kubeadm/api/v1alpha3/conversion.go @@ -20,20 +20,20 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - kubeadmbootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmControlPlane) + dst := dstRaw.(*controlplanev1.KubeadmControlPlane) if err := Convert_v1alpha3_KubeadmControlPlane_To_v1beta1_KubeadmControlPlane(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.KubeadmControlPlane{} + restored := &controlplanev1.KubeadmControlPlane{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -43,14 +43,14 @@ func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { if restored.Spec.KubeadmConfigSpec.JoinConfiguration != nil && restored.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors != nil { if dst.Spec.KubeadmConfigSpec.JoinConfiguration == nil { - dst.Spec.KubeadmConfigSpec.JoinConfiguration = &kubeadmbootstrapv1.JoinConfiguration{} + dst.Spec.KubeadmConfigSpec.JoinConfiguration = &bootstrapv1.JoinConfiguration{} } dst.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = restored.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors } if restored.Spec.KubeadmConfigSpec.InitConfiguration != nil && restored.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.IgnorePreflightErrors != nil { if dst.Spec.KubeadmConfigSpec.InitConfiguration == nil { - dst.Spec.KubeadmConfigSpec.InitConfiguration = &kubeadmbootstrapv1.InitConfiguration{} + dst.Spec.KubeadmConfigSpec.InitConfiguration = &bootstrapv1.InitConfiguration{} } dst.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.IgnorePreflightErrors = restored.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.IgnorePreflightErrors } @@ -61,7 +61,7 @@ func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { } func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmControlPlane) + src := srcRaw.(*controlplanev1.KubeadmControlPlane) if err := Convert_v1beta1_KubeadmControlPlane_To_v1alpha3_KubeadmControlPlane(src, dst, nil); err != nil { return err @@ -76,30 +76,30 @@ func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { } func (src *KubeadmControlPlaneList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmControlPlaneList) + dst := dstRaw.(*controlplanev1.KubeadmControlPlaneList) return Convert_v1alpha3_KubeadmControlPlaneList_To_v1beta1_KubeadmControlPlaneList(src, dst, nil) } func (dst *KubeadmControlPlaneList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmControlPlaneList) + src := srcRaw.(*controlplanev1.KubeadmControlPlaneList) return Convert_v1beta1_KubeadmControlPlaneList_To_v1alpha3_KubeadmControlPlaneList(src, dst, nil) } -func Convert_v1beta1_KubeadmControlPlaneSpec_To_v1alpha3_KubeadmControlPlaneSpec(in *v1beta1.KubeadmControlPlaneSpec, out *KubeadmControlPlaneSpec, s apiconversion.Scope) error { +func Convert_v1beta1_KubeadmControlPlaneSpec_To_v1alpha3_KubeadmControlPlaneSpec(in *controlplanev1.KubeadmControlPlaneSpec, out *KubeadmControlPlaneSpec, s apiconversion.Scope) error { out.UpgradeAfter = in.RolloutAfter out.InfrastructureTemplate = in.MachineTemplate.InfrastructureRef out.NodeDrainTimeout = in.MachineTemplate.NodeDrainTimeout return autoConvert_v1beta1_KubeadmControlPlaneSpec_To_v1alpha3_KubeadmControlPlaneSpec(in, out, s) } -func Convert_v1beta1_KubeadmControlPlaneStatus_To_v1alpha3_KubeadmControlPlaneStatus(in *v1beta1.KubeadmControlPlaneStatus, out *KubeadmControlPlaneStatus, s apiconversion.Scope) error { +func Convert_v1beta1_KubeadmControlPlaneStatus_To_v1alpha3_KubeadmControlPlaneStatus(in *controlplanev1.KubeadmControlPlaneStatus, out *KubeadmControlPlaneStatus, s apiconversion.Scope) error { // NOTE: custom conversion func is required because status.Version does not exist in v1alpha3. return autoConvert_v1beta1_KubeadmControlPlaneStatus_To_v1alpha3_KubeadmControlPlaneStatus(in, out, s) } -func Convert_v1alpha3_KubeadmControlPlaneSpec_To_v1beta1_KubeadmControlPlaneSpec(in *KubeadmControlPlaneSpec, out *v1beta1.KubeadmControlPlaneSpec, s apiconversion.Scope) error { +func Convert_v1alpha3_KubeadmControlPlaneSpec_To_v1beta1_KubeadmControlPlaneSpec(in *KubeadmControlPlaneSpec, out *controlplanev1.KubeadmControlPlaneSpec, s apiconversion.Scope) error { out.RolloutAfter = in.UpgradeAfter out.MachineTemplate.InfrastructureRef = in.InfrastructureTemplate out.MachineTemplate.NodeDrainTimeout = in.NodeDrainTimeout diff --git a/controlplane/kubeadm/api/v1alpha3/conversion_test.go b/controlplane/kubeadm/api/v1alpha3/conversion_test.go index 2d23a5d9bd10..0a2dc0a795dd 100644 --- a/controlplane/kubeadm/api/v1alpha3/conversion_test.go +++ b/controlplane/kubeadm/api/v1alpha3/conversion_test.go @@ -23,15 +23,15 @@ import ( "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for KubeadmControlPlane", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.KubeadmControlPlane{}, + Hub: &controlplanev1.KubeadmControlPlane{}, Spoke: &KubeadmControlPlane{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) @@ -60,7 +60,7 @@ func kubeadmBootstrapTokenStringFuzzer(in *upstreamv1beta1.BootstrapTokenString, in.ID = "abcdef" in.Secret = "abcdef0123456789" } -func cabpkBootstrapTokenStringFuzzer(in *cabpkv1.BootstrapTokenString, c fuzz.Continue) { +func cabpkBootstrapTokenStringFuzzer(in *bootstrapv1.BootstrapTokenString, c fuzz.Continue) { in.ID = "abcdef" in.Secret = "abcdef0123456789" } diff --git a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go index 1dfc2f20035e..386c97ba160d 100644 --- a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" + bootstrapv1alpha3 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/errors" ) @@ -74,7 +74,7 @@ type KubeadmControlPlaneSpec struct { // KubeadmConfigSpec is a KubeadmConfigSpec // to use for initializing and joining machines to the control plane. - KubeadmConfigSpec cabpkv1.KubeadmConfigSpec `json:"kubeadmConfigSpec"` + KubeadmConfigSpec bootstrapv1alpha3.KubeadmConfigSpec `json:"kubeadmConfigSpec"` // UpgradeAfter is a field to indicate an upgrade should be performed // after the specified time even if no changes have been made to the @@ -180,7 +180,7 @@ type KubeadmControlPlaneStatus struct { // Conditions defines current service state of the KubeadmControlPlane. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -205,12 +205,12 @@ type KubeadmControlPlane struct { } // GetConditions returns the set of conditions for this object. -func (in *KubeadmControlPlane) GetConditions() clusterv1.Conditions { +func (in *KubeadmControlPlane) GetConditions() clusterv1alpha3.Conditions { return in.Status.Conditions } // SetConditions sets the conditions on this object. -func (in *KubeadmControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (in *KubeadmControlPlane) SetConditions(conditions clusterv1alpha3.Conditions) { in.Status.Conditions = conditions } diff --git a/controlplane/kubeadm/api/v1alpha3/webhook_test.go b/controlplane/kubeadm/api/v1alpha3/webhook_test.go index 9008b7f4dad8..e92b414b56fb 100644 --- a/controlplane/kubeadm/api/v1alpha3/webhook_test.go +++ b/controlplane/kubeadm/api/v1alpha3/webhook_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" + bootstrapv1alpha3 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" "sigs.k8s.io/cluster-api/util" ) @@ -52,7 +52,7 @@ func TestKubeadmControlPlaneConversion(t *testing.T) { Namespace: ns.Name, Name: infraMachineTemplateName, }, - KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{ + KubeadmConfigSpec: bootstrapv1alpha3.KubeadmConfigSpec{ ClusterConfiguration: &upstreamv1beta1.ClusterConfiguration{ APIServer: upstreamv1beta1.APIServer{ ControlPlaneComponent: upstreamv1beta1.ControlPlaneComponent{ diff --git a/controlplane/kubeadm/api/v1alpha4/condition_consts.go b/controlplane/kubeadm/api/v1alpha4/condition_consts.go index 25329e9edc32..db83335b0374 100644 --- a/controlplane/kubeadm/api/v1alpha4/condition_consts.go +++ b/controlplane/kubeadm/api/v1alpha4/condition_consts.go @@ -16,19 +16,19 @@ limitations under the License. package v1alpha4 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" +import clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" // Conditions and condition Reasons for the KubeadmControlPlane object. const ( // MachinesReadyCondition reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. - MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" + MachinesReadyCondition clusterv1alpha4.ConditionType = "MachinesReady" ) const ( // CertificatesAvailableCondition documents that cluster certificates were generated as part of the // processing of a a KubeadmControlPlane object. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1alpha4.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmControlPlane controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller @@ -39,7 +39,7 @@ const ( const ( // AvailableCondition documents that the first control plane instance has completed the kubeadm init operation // and so the control plane is available and an API server instance is ready for processing requests. - AvailableCondition clusterv1.ConditionType = "Available" + AvailableCondition clusterv1alpha4.ConditionType = "Available" // WaitingForKubeadmInitReason (Severity=Info) documents a KubeadmControlPlane object waiting for the first // control plane instance to complete the kubeadm init operation. @@ -49,7 +49,7 @@ const ( const ( // MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the KubeadmControlPlane // is up to date. Whe this condition is false, the KubeadmControlPlane is executing a rolling upgrade. - MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate" + MachinesSpecUpToDateCondition clusterv1alpha4.ConditionType = "MachinesSpecUpToDate" // RollingUpdateInProgressReason (Severity=Warning) documents a KubeadmControlPlane object executing a // rolling upgrade for aligning the machines spec to the desired state. @@ -58,7 +58,7 @@ const ( const ( // ResizedCondition documents a KubeadmControlPlane that is resizing the set of controlled machines. - ResizedCondition clusterv1.ConditionType = "Resized" + ResizedCondition clusterv1alpha4.ConditionType = "Resized" // ScalingUpReason (Severity=Info) documents a KubeadmControlPlane that is increasing the number of replicas. ScalingUpReason = "ScalingUp" @@ -71,7 +71,7 @@ const ( // ControlPlaneComponentsHealthyCondition reports the overall status of control plane components // implemented as static pods generated by kubeadm including kube-api-server, kube-controller manager, // kube-scheduler and etcd if managed. - ControlPlaneComponentsHealthyCondition clusterv1.ConditionType = "ControlPlaneComponentsHealthy" + ControlPlaneComponentsHealthyCondition clusterv1alpha4.ConditionType = "ControlPlaneComponentsHealthy" // ControlPlaneComponentsUnhealthyReason (Severity=Error) documents a control plane component not healthy. ControlPlaneComponentsUnhealthyReason = "ControlPlaneComponentsUnhealthy" @@ -83,17 +83,17 @@ const ( ControlPlaneComponentsInspectionFailedReason = "ControlPlaneComponentsInspectionFailed" // MachineAPIServerPodHealthyCondition reports a machine's kube-apiserver's operational status. - MachineAPIServerPodHealthyCondition clusterv1.ConditionType = "APIServerPodHealthy" + MachineAPIServerPodHealthyCondition clusterv1alpha4.ConditionType = "APIServerPodHealthy" // MachineControllerManagerPodHealthyCondition reports a machine's kube-controller-manager's health status. - MachineControllerManagerPodHealthyCondition clusterv1.ConditionType = "ControllerManagerPodHealthy" + MachineControllerManagerPodHealthyCondition clusterv1alpha4.ConditionType = "ControllerManagerPodHealthy" // MachineSchedulerPodHealthyCondition reports a machine's kube-scheduler's operational status. - MachineSchedulerPodHealthyCondition clusterv1.ConditionType = "SchedulerPodHealthy" + MachineSchedulerPodHealthyCondition clusterv1alpha4.ConditionType = "SchedulerPodHealthy" // MachineEtcdPodHealthyCondition reports a machine's etcd pod's operational status. // NOTE: This conditions exists only if a stacked etcd cluster is used. - MachineEtcdPodHealthyCondition clusterv1.ConditionType = "EtcdPodHealthy" + MachineEtcdPodHealthyCondition clusterv1alpha4.ConditionType = "EtcdPodHealthy" // PodProvisioningReason (Severity=Info) documents a pod waiting to be provisioned i.e., Pod is in "Pending" phase. PodProvisioningReason = "PodProvisioning" @@ -111,7 +111,7 @@ const ( const ( // EtcdClusterHealthyCondition documents the overall etcd cluster's health. - EtcdClusterHealthyCondition clusterv1.ConditionType = "EtcdClusterHealthyCondition" + EtcdClusterHealthyCondition clusterv1alpha4.ConditionType = "EtcdClusterHealthyCondition" // EtcdClusterInspectionFailedReason documents a failure in inspecting the etcd cluster status. EtcdClusterInspectionFailedReason = "EtcdClusterInspectionFailed" @@ -124,7 +124,7 @@ const ( // MachineEtcdMemberHealthyCondition report the machine's etcd member's health status. // NOTE: This conditions exists only if a stacked etcd cluster is used. - MachineEtcdMemberHealthyCondition clusterv1.ConditionType = "EtcdMemberHealthy" + MachineEtcdMemberHealthyCondition clusterv1alpha4.ConditionType = "EtcdMemberHealthy" // EtcdMemberInspectionFailedReason documents a failure in inspecting the etcd member status. EtcdMemberInspectionFailedReason = "MemberInspectionFailed" @@ -135,7 +135,7 @@ const ( // MachinesCreatedCondition documents that the machines controlled by the KubeadmControlPlane are created. // When this condition is false, it indicates that there was an error when cloning the infrastructure/bootstrap template or // when generating the machine object. - MachinesCreatedCondition clusterv1.ConditionType = "MachinesCreated" + MachinesCreatedCondition clusterv1alpha4.ConditionType = "MachinesCreated" // InfrastructureTemplateCloningFailedReason (Severity=Error) documents a KubeadmControlPlane failing to // clone the infrastructure template. diff --git a/controlplane/kubeadm/api/v1alpha4/conversion.go b/controlplane/kubeadm/api/v1alpha4/conversion.go index 4ecc9ff3eb69..1332b29e8b81 100644 --- a/controlplane/kubeadm/api/v1alpha4/conversion.go +++ b/controlplane/kubeadm/api/v1alpha4/conversion.go @@ -20,20 +20,20 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - kubeadmbootstrapv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + bootstrapv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmControlPlane) + dst := dstRaw.(*controlplanev1.KubeadmControlPlane) if err := Convert_v1alpha4_KubeadmControlPlane_To_v1beta1_KubeadmControlPlane(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.KubeadmControlPlane{} + restored := &controlplanev1.KubeadmControlPlane{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -44,7 +44,7 @@ func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { } func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmControlPlane) + src := srcRaw.(*controlplanev1.KubeadmControlPlane) if err := Convert_v1beta1_KubeadmControlPlane_To_v1alpha4_KubeadmControlPlane(src, dst, nil); err != nil { return err @@ -55,26 +55,26 @@ func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { } func (src *KubeadmControlPlaneList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmControlPlaneList) + dst := dstRaw.(*controlplanev1.KubeadmControlPlaneList) return Convert_v1alpha4_KubeadmControlPlaneList_To_v1beta1_KubeadmControlPlaneList(src, dst, nil) } func (dst *KubeadmControlPlaneList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmControlPlaneList) + src := srcRaw.(*controlplanev1.KubeadmControlPlaneList) return Convert_v1beta1_KubeadmControlPlaneList_To_v1alpha4_KubeadmControlPlaneList(src, dst, nil) } func (src *KubeadmControlPlaneTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmControlPlaneTemplate) + dst := dstRaw.(*controlplanev1.KubeadmControlPlaneTemplate) if err := Convert_v1alpha4_KubeadmControlPlaneTemplate_To_v1beta1_KubeadmControlPlaneTemplate(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.KubeadmControlPlaneTemplate{} + restored := &controlplanev1.KubeadmControlPlaneTemplate{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -86,7 +86,7 @@ func (src *KubeadmControlPlaneTemplate) ConvertTo(dstRaw conversion.Hub) error { } func (dst *KubeadmControlPlaneTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmControlPlaneTemplate) + src := srcRaw.(*controlplanev1.KubeadmControlPlaneTemplate) if err := Convert_v1beta1_KubeadmControlPlaneTemplate_To_v1alpha4_KubeadmControlPlaneTemplate(src, dst, nil); err != nil { return err @@ -97,35 +97,35 @@ func (dst *KubeadmControlPlaneTemplate) ConvertFrom(srcRaw conversion.Hub) error } func (src *KubeadmControlPlaneTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.KubeadmControlPlaneTemplateList) + dst := dstRaw.(*controlplanev1.KubeadmControlPlaneTemplateList) return Convert_v1alpha4_KubeadmControlPlaneTemplateList_To_v1beta1_KubeadmControlPlaneTemplateList(src, dst, nil) } func (dst *KubeadmControlPlaneTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.KubeadmControlPlaneTemplateList) + src := srcRaw.(*controlplanev1.KubeadmControlPlaneTemplateList) return Convert_v1beta1_KubeadmControlPlaneTemplateList_To_v1alpha4_KubeadmControlPlaneTemplateList(src, dst, nil) } -func Convert_v1alpha4_KubeadmControlPlaneSpec_To_v1beta1_KubeadmControlPlaneTemplateResourceSpec(in *KubeadmControlPlaneSpec, out *v1beta1.KubeadmControlPlaneTemplateResourceSpec, s apiconversion.Scope) error { - out.MachineTemplate = &v1beta1.KubeadmControlPlaneTemplateMachineTemplate{ +func Convert_v1alpha4_KubeadmControlPlaneSpec_To_v1beta1_KubeadmControlPlaneTemplateResourceSpec(in *KubeadmControlPlaneSpec, out *controlplanev1.KubeadmControlPlaneTemplateResourceSpec, s apiconversion.Scope) error { + out.MachineTemplate = &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ NodeDrainTimeout: in.MachineTemplate.NodeDrainTimeout, } - if err := kubeadmbootstrapv1alpha4.Convert_v1alpha4_KubeadmConfigSpec_To_v1beta1_KubeadmConfigSpec(&in.KubeadmConfigSpec, &out.KubeadmConfigSpec, s); err != nil { + if err := bootstrapv1alpha4.Convert_v1alpha4_KubeadmConfigSpec_To_v1beta1_KubeadmConfigSpec(&in.KubeadmConfigSpec, &out.KubeadmConfigSpec, s); err != nil { return err } out.RolloutAfter = in.RolloutAfter if in.RolloutStrategy != nil { - out.RolloutStrategy = &v1beta1.RolloutStrategy{} + out.RolloutStrategy = &controlplanev1.RolloutStrategy{} if len(in.RolloutStrategy.Type) > 0 { - out.RolloutStrategy.Type = v1beta1.RolloutStrategyType(in.RolloutStrategy.Type) + out.RolloutStrategy.Type = controlplanev1.RolloutStrategyType(in.RolloutStrategy.Type) } if in.RolloutStrategy.RollingUpdate != nil { - out.RolloutStrategy.RollingUpdate = &v1beta1.RollingUpdate{} + out.RolloutStrategy.RollingUpdate = &controlplanev1.RollingUpdate{} if in.RolloutStrategy.RollingUpdate.MaxSurge != nil { out.RolloutStrategy.RollingUpdate.MaxSurge = in.RolloutStrategy.RollingUpdate.MaxSurge @@ -136,12 +136,12 @@ func Convert_v1alpha4_KubeadmControlPlaneSpec_To_v1beta1_KubeadmControlPlaneTemp return nil } -func Convert_v1beta1_KubeadmControlPlaneTemplateResourceSpec_To_v1alpha4_KubeadmControlPlaneSpec(in *v1beta1.KubeadmControlPlaneTemplateResourceSpec, out *KubeadmControlPlaneSpec, s apiconversion.Scope) error { +func Convert_v1beta1_KubeadmControlPlaneTemplateResourceSpec_To_v1alpha4_KubeadmControlPlaneSpec(in *controlplanev1.KubeadmControlPlaneTemplateResourceSpec, out *KubeadmControlPlaneSpec, s apiconversion.Scope) error { if in.MachineTemplate != nil { out.MachineTemplate.NodeDrainTimeout = in.MachineTemplate.NodeDrainTimeout } - if err := kubeadmbootstrapv1alpha4.Convert_v1beta1_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(&in.KubeadmConfigSpec, &out.KubeadmConfigSpec, s); err != nil { + if err := bootstrapv1alpha4.Convert_v1beta1_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(&in.KubeadmConfigSpec, &out.KubeadmConfigSpec, s); err != nil { return err } diff --git a/controlplane/kubeadm/api/v1alpha4/conversion_test.go b/controlplane/kubeadm/api/v1alpha4/conversion_test.go index 2821468c59ab..40e2821d306b 100644 --- a/controlplane/kubeadm/api/v1alpha4/conversion_test.go +++ b/controlplane/kubeadm/api/v1alpha4/conversion_test.go @@ -25,10 +25,10 @@ import ( runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" - cabpkv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstreamv1beta1" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -39,13 +39,13 @@ const ( func TestFuzzyConversion(t *testing.T) { t.Run("for KubeadmControlPlane", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.KubeadmControlPlane{}, + Hub: &controlplanev1.KubeadmControlPlane{}, Spoke: &KubeadmControlPlane{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for KubeadmControlPlaneTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.KubeadmControlPlaneTemplate{}, + Hub: &controlplanev1.KubeadmControlPlaneTemplate{}, Spoke: &KubeadmControlPlaneTemplate{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) @@ -76,7 +76,7 @@ func kubeadmBootstrapTokenStringFuzzer(in *upstreamv1beta1.BootstrapTokenString, in.Secret = fakeSecret } -func cabpkBootstrapTokenStringFuzzer(in *cabpkv1.BootstrapTokenString, c fuzz.Continue) { +func cabpkBootstrapTokenStringFuzzer(in *bootstrapv1.BootstrapTokenString, c fuzz.Continue) { in.ID = fakeID in.Secret = fakeSecret } @@ -88,7 +88,7 @@ func dnsFuzzer(obj *upstreamv1beta1.DNS, c fuzz.Continue) { obj.Type = "" } -func kubeadmBootstrapTokenStringFuzzerV1Alpha4(in *cabpkv1alpha4.BootstrapTokenString, c fuzz.Continue) { +func kubeadmBootstrapTokenStringFuzzerV1Alpha4(in *bootstrapv1alpha4.BootstrapTokenString, c fuzz.Continue) { in.ID = fakeID in.Secret = fakeSecret } diff --git a/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go index c6339d8f43b3..f909d4c6db9f 100644 --- a/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1alpha4/kubeadm_control_plane_types.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" + bootstrapv1alpha4 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/errors" ) @@ -68,7 +68,7 @@ type KubeadmControlPlaneSpec struct { // KubeadmConfigSpec is a KubeadmConfigSpec // to use for initializing and joining machines to the control plane. - KubeadmConfigSpec cabpkv1.KubeadmConfigSpec `json:"kubeadmConfigSpec"` + KubeadmConfigSpec bootstrapv1alpha4.KubeadmConfigSpec `json:"kubeadmConfigSpec"` // RolloutAfter is a field to indicate a rollout should be performed // after the specified time even if no changes have been made to the @@ -90,7 +90,7 @@ type KubeadmControlPlaneMachineTemplate struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1alpha4.ObjectMeta `json:"metadata,omitempty"` // InfrastructureRef is a required reference to a custom resource // offered by an infrastructure provider. @@ -194,7 +194,7 @@ type KubeadmControlPlaneStatus struct { // Conditions defines current service state of the KubeadmControlPlane. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -220,12 +220,12 @@ type KubeadmControlPlane struct { } // GetConditions returns the set of conditions for this object. -func (in *KubeadmControlPlane) GetConditions() clusterv1.Conditions { +func (in *KubeadmControlPlane) GetConditions() clusterv1alpha4.Conditions { return in.Status.Conditions } // SetConditions sets the conditions on this object. -func (in *KubeadmControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (in *KubeadmControlPlane) SetConditions(conditions clusterv1alpha4.Conditions) { in.Status.Conditions = conditions } diff --git a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go index 641d1dccd579..e7e41c19136f 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/errors" ) @@ -68,7 +68,7 @@ type KubeadmControlPlaneSpec struct { // KubeadmConfigSpec is a KubeadmConfigSpec // to use for initializing and joining machines to the control plane. - KubeadmConfigSpec cabpkv1.KubeadmConfigSpec `json:"kubeadmConfigSpec"` + KubeadmConfigSpec bootstrapv1.KubeadmConfigSpec `json:"kubeadmConfigSpec"` // RolloutAfter is a field to indicate a rollout should be performed // after the specified time even if no changes have been made to the diff --git a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook.go b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook.go index 053c6104eb54..7fda94cf498d 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook.go +++ b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_webhook.go @@ -32,7 +32,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util/container" "sigs.k8s.io/cluster-api/util/version" ) @@ -347,7 +347,7 @@ func validateRolloutStrategy(rolloutStrategy *RolloutStrategy, replicas *int32, return allErrs } -func validateClusterConfiguration(newClusterConfiguration, oldClusterConfiguration *cabpkv1.ClusterConfiguration, pathPrefix *field.Path) field.ErrorList { +func validateClusterConfiguration(newClusterConfiguration, oldClusterConfiguration *bootstrapv1.ClusterConfiguration, pathPrefix *field.Path) field.ErrorList { allErrs := field.ErrorList{} if newClusterConfiguration == nil { diff --git a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go b/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go index 85d66e4f90a3..e5e3e15f2862 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go +++ b/controlplane/kubeadm/api/v1beta1/kubeadmcontrolplanetemplate_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" ) // KubeadmControlPlaneTemplateSpec defines the desired state of KubeadmControlPlaneTemplate. @@ -71,7 +71,7 @@ type KubeadmControlPlaneTemplateResourceSpec struct { // KubeadmConfigSpec is a KubeadmConfigSpec // to use for initializing and joining machines to the control plane. - KubeadmConfigSpec cabpkv1.KubeadmConfigSpec `json:"kubeadmConfigSpec"` + KubeadmConfigSpec bootstrapv1.KubeadmConfigSpec `json:"kubeadmConfigSpec"` // RolloutAfter is a field to indicate a rollout should be performed // after the specified time even if no changes have been made to the diff --git a/controlplane/kubeadm/internal/webhooks/scale.go b/controlplane/kubeadm/internal/webhooks/scale.go index 7dcf0efcc2ef..506b398afaff 100644 --- a/controlplane/kubeadm/internal/webhooks/scale.go +++ b/controlplane/kubeadm/internal/webhooks/scale.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" ) func (v *ScaleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { @@ -55,7 +55,7 @@ func (v *ScaleValidator) Handle(ctx context.Context, req admission.Request) admi return admission.Errored(http.StatusBadRequest, errors.Wrapf(err, "failed to decode Scale resource")) } - kcp := &v1beta1.KubeadmControlPlane{} + kcp := &controlplanev1.KubeadmControlPlane{} kcpKey := types.NamespacedName{Namespace: scale.ObjectMeta.Namespace, Name: scale.ObjectMeta.Name} if err = v.Client.Get(ctx, kcpKey, kcp); err != nil { return admission.Errored(http.StatusInternalServerError, errors.Wrapf(err, "failed to get KubeadmControlPlane %s/%s", scale.ObjectMeta.Namespace, scale.ObjectMeta.Name)) diff --git a/controlplane/kubeadm/internal/webhooks/scale_test.go b/controlplane/kubeadm/internal/webhooks/scale_test.go index 91919d58ec77..d8308366ba2a 100644 --- a/controlplane/kubeadm/internal/webhooks/scale_test.go +++ b/controlplane/kubeadm/internal/webhooks/scale_test.go @@ -33,12 +33,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" ) func init() { scheme = runtime.NewScheme() - _ = kcpv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) _ = admissionv1.AddToScheme(scheme) } @@ -47,13 +47,13 @@ var ( ) func TestKubeadmControlPlaneValidateScale(t *testing.T) { - kcpManagedEtcd := &kcpv1.KubeadmControlPlane{ + kcpManagedEtcd := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "kcp-managed-etcd", Namespace: "foo", }, - Spec: kcpv1.KubeadmControlPlaneSpec{ - MachineTemplate: kcpv1.KubeadmControlPlaneMachineTemplate{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "test/v1alpha1", Kind: "UnknownInfraMachine", @@ -63,9 +63,9 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, }, Replicas: pointer.Int32Ptr(1), - RolloutStrategy: &kcpv1.RolloutStrategy{ - Type: kcpv1.RollingUpdateStrategyType, - RollingUpdate: &kcpv1.RollingUpdate{ + RolloutStrategy: &controlplanev1.RolloutStrategy{ + Type: controlplanev1.RollingUpdateStrategyType, + RollingUpdate: &controlplanev1.RollingUpdate{ MaxSurge: &intstr.IntOrString{ IntVal: 1, }, diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index d6c848ddc70e..d1035302e5c9 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -34,7 +34,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util/yaml" ) @@ -46,42 +46,42 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { expectImage string clientGet map[string]interface{} patchErr error - KCP *v1beta1.KubeadmControlPlane + KCP *controlplanev1.KubeadmControlPlane }{ { name: "succeeds if patch correctly", ds: newKubeProxyDS(), expectErr: false, expectImage: "k8s.gcr.io/kube-proxy:v1.16.3", - KCP: &v1beta1.KubeadmControlPlane{Spec: v1beta1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, + KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, }, { name: "returns error if image in kube-proxy ds was in digest format", ds: newKubeProxyDSWithImage("k8s.gcr.io/kube-proxy@sha256:47bfd"), expectErr: true, expectImage: "k8s.gcr.io/kube-proxy@sha256:47bfd", - KCP: &v1beta1.KubeadmControlPlane{Spec: v1beta1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, + KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, }, { name: "expects OCI compatible format of tag", ds: newKubeProxyDS(), expectErr: false, expectImage: "k8s.gcr.io/kube-proxy:v1.16.3_build1", - KCP: &v1beta1.KubeadmControlPlane{Spec: v1beta1.KubeadmControlPlaneSpec{Version: "v1.16.3+build1"}}, + KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3+build1"}}, }, { name: "returns error if image in kube-proxy ds was in wrong format", ds: newKubeProxyDSWithImage(""), expectErr: true, - KCP: &v1beta1.KubeadmControlPlane{Spec: v1beta1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, + KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}}, }, { name: "updates image repository if one has been set on the control plane", ds: newKubeProxyDS(), expectErr: false, expectImage: "foo.bar.example/baz/qux/kube-proxy:v1.16.3", - KCP: &v1beta1.KubeadmControlPlane{ - Spec: v1beta1.KubeadmControlPlaneSpec{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.3", KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -95,8 +95,8 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { ds: newKubeProxyDS(), expectErr: false, expectImage: "k8s.gcr.io/kube-proxy:v1.16.3", - KCP: &v1beta1.KubeadmControlPlane{ - Spec: v1beta1.KubeadmControlPlaneSpec{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.3", KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -109,8 +109,8 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { name: "returns error if image repository is invalid", ds: newKubeProxyDS(), expectErr: true, - KCP: &v1beta1.KubeadmControlPlane{ - Spec: v1beta1.KubeadmControlPlaneSpec{ + KCP: &controlplanev1.KubeadmControlPlane{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.3", KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -124,13 +124,13 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { ds: newKubeProxyDSWithImage(""), // Using the same image name that would otherwise lead to an error expectErr: false, expectImage: "", - KCP: &v1beta1.KubeadmControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - v1beta1.SkipKubeProxyAnnotation: "", + controlplanev1.SkipKubeProxyAnnotation: "", }, }, - Spec: v1beta1.KubeadmControlPlaneSpec{ + Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.3", }}, }, diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index eeae736663d0..a3f8858009af 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -42,11 +42,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmbootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" - kcpv1alpha3 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" - kcpv1alpha4 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" - kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1alpha3 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + controlplanev1alpha4 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" kubeadmcontrolplanecontrollers "sigs.k8s.io/cluster-api/controlplane/kubeadm/controllers" kcpwebhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/webhooks" "sigs.k8s.io/cluster-api/feature" @@ -63,10 +63,10 @@ func init() { _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) - _ = kcpv1alpha3.AddToScheme(scheme) - _ = kcpv1alpha4.AddToScheme(scheme) - _ = kcpv1.AddToScheme(scheme) - _ = kubeadmbootstrapv1.AddToScheme(scheme) + _ = controlplanev1alpha3.AddToScheme(scheme) + _ = controlplanev1alpha4.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) _ = apiextensionsv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } @@ -244,7 +244,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { } func setupWebhooks(mgr ctrl.Manager) { - if err := (&kcpv1.KubeadmControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&controlplanev1.KubeadmControlPlane{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmControlPlane") os.Exit(1) } @@ -256,7 +256,7 @@ func setupWebhooks(mgr ctrl.Manager) { os.Exit(1) } - if err := (&kcpv1.KubeadmControlPlaneTemplate{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&controlplanev1.KubeadmControlPlaneTemplate{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "KubeadmControlPlaneTemplate") os.Exit(1) } diff --git a/exp/addons/api/v1alpha3/clusterresourceset_types.go b/exp/addons/api/v1alpha3/clusterresourceset_types.go index 54b7fc2029c5..f5250ba2f387 100644 --- a/exp/addons/api/v1alpha3/clusterresourceset_types.go +++ b/exp/addons/api/v1alpha3/clusterresourceset_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" ) const ( @@ -95,18 +95,18 @@ type ClusterResourceSetStatus struct { // Conditions defines current state of the ClusterResourceSet. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"` } // ANCHOR_END: ClusterResourceSetStatus // GetConditions returns the set of conditions for this object. -func (m *ClusterResourceSet) GetConditions() clusterv1.Conditions { +func (m *ClusterResourceSet) GetConditions() clusterv1alpha3.Conditions { return m.Status.Conditions } // SetConditions sets the conditions on this object. -func (m *ClusterResourceSet) SetConditions(conditions clusterv1.Conditions) { +func (m *ClusterResourceSet) SetConditions(conditions clusterv1alpha3.Conditions) { m.Status.Conditions = conditions } diff --git a/exp/addons/api/v1alpha3/condition_consts.go b/exp/addons/api/v1alpha3/condition_consts.go index fe9115d61e41..6efa96bce80a 100644 --- a/exp/addons/api/v1alpha3/condition_consts.go +++ b/exp/addons/api/v1alpha3/condition_consts.go @@ -16,14 +16,14 @@ limitations under the License. package v1alpha3 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" // Conditions and condition Reasons for the ClusterResourceSet object. const ( // ResourcesAppliedCondition documents that all resources in the ClusterResourceSet object are applied to // all matching clusters. This indicates all resources exist, and no errors during applying them to all clusters. - ResourcesAppliedCondition clusterv1.ConditionType = "ResourcesApplied" + ResourcesAppliedCondition clusterv1alpha3.ConditionType = "ResourcesApplied" // RemoteClusterClientFailedReason (Severity=Error) documents failure during getting the remote cluster client. RemoteClusterClientFailedReason = "RemoteClusterClientFailed" diff --git a/exp/addons/api/v1alpha3/conversion.go b/exp/addons/api/v1alpha3/conversion.go index 1f498c1d770a..25cd0b8b5db1 100644 --- a/exp/addons/api/v1alpha3/conversion.go +++ b/exp/addons/api/v1alpha3/conversion.go @@ -19,53 +19,53 @@ package v1alpha3 import ( "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" ) func (src *ClusterResourceSet) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSet) + dst := dstRaw.(*addonsv1.ClusterResourceSet) return Convert_v1alpha3_ClusterResourceSet_To_v1beta1_ClusterResourceSet(src, dst, nil) } func (dst *ClusterResourceSet) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSet) + src := srcRaw.(*addonsv1.ClusterResourceSet) return Convert_v1beta1_ClusterResourceSet_To_v1alpha3_ClusterResourceSet(src, dst, nil) } func (src *ClusterResourceSetList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSetList) + dst := dstRaw.(*addonsv1.ClusterResourceSetList) return Convert_v1alpha3_ClusterResourceSetList_To_v1beta1_ClusterResourceSetList(src, dst, nil) } func (dst *ClusterResourceSetList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSetList) + src := srcRaw.(*addonsv1.ClusterResourceSetList) return Convert_v1beta1_ClusterResourceSetList_To_v1alpha3_ClusterResourceSetList(src, dst, nil) } func (src *ClusterResourceSetBinding) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSetBinding) + dst := dstRaw.(*addonsv1.ClusterResourceSetBinding) return Convert_v1alpha3_ClusterResourceSetBinding_To_v1beta1_ClusterResourceSetBinding(src, dst, nil) } func (dst *ClusterResourceSetBinding) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSetBinding) + src := srcRaw.(*addonsv1.ClusterResourceSetBinding) return Convert_v1beta1_ClusterResourceSetBinding_To_v1alpha3_ClusterResourceSetBinding(src, dst, nil) } func (src *ClusterResourceSetBindingList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSetBindingList) + dst := dstRaw.(*addonsv1.ClusterResourceSetBindingList) return Convert_v1alpha3_ClusterResourceSetBindingList_To_v1beta1_ClusterResourceSetBindingList(src, dst, nil) } func (dst *ClusterResourceSetBindingList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSetBindingList) + src := srcRaw.(*addonsv1.ClusterResourceSetBindingList) return Convert_v1beta1_ClusterResourceSetBindingList_To_v1alpha3_ClusterResourceSetBindingList(src, dst, nil) } diff --git a/exp/addons/api/v1alpha3/conversion_test.go b/exp/addons/api/v1alpha3/conversion_test.go index f9a77b0eb2aa..50cf6d755d5b 100644 --- a/exp/addons/api/v1alpha3/conversion_test.go +++ b/exp/addons/api/v1alpha3/conversion_test.go @@ -19,17 +19,17 @@ package v1alpha3 import ( "testing" - clusterv1addons "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for ClusterResourceSet", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &clusterv1addons.ClusterResourceSet{}, + Hub: &addonsv1.ClusterResourceSet{}, Spoke: &ClusterResourceSet{}, })) t.Run("for ClusterResourceSetBinding", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &clusterv1addons.ClusterResourceSetBinding{}, + Hub: &addonsv1.ClusterResourceSetBinding{}, Spoke: &ClusterResourceSetBinding{}, })) } diff --git a/exp/addons/api/v1alpha4/clusterresourceset_types.go b/exp/addons/api/v1alpha4/clusterresourceset_types.go index fe2b75e433f9..d632ea80755f 100644 --- a/exp/addons/api/v1alpha4/clusterresourceset_types.go +++ b/exp/addons/api/v1alpha4/clusterresourceset_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" ) const ( @@ -96,18 +96,18 @@ type ClusterResourceSetStatus struct { // Conditions defines current state of the ClusterResourceSet. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"` } // ANCHOR_END: ClusterResourceSetStatus // GetConditions returns the set of conditions for this object. -func (m *ClusterResourceSet) GetConditions() clusterv1.Conditions { +func (m *ClusterResourceSet) GetConditions() clusterv1alpha4.Conditions { return m.Status.Conditions } // SetConditions sets the conditions on this object. -func (m *ClusterResourceSet) SetConditions(conditions clusterv1.Conditions) { +func (m *ClusterResourceSet) SetConditions(conditions clusterv1alpha4.Conditions) { m.Status.Conditions = conditions } diff --git a/exp/addons/api/v1alpha4/conversion.go b/exp/addons/api/v1alpha4/conversion.go index 3f51cd44f4b0..1de13aac78c0 100644 --- a/exp/addons/api/v1alpha4/conversion.go +++ b/exp/addons/api/v1alpha4/conversion.go @@ -19,53 +19,53 @@ package v1alpha4 import ( "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" ) func (src *ClusterResourceSet) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSet) + dst := dstRaw.(*addonsv1.ClusterResourceSet) return Convert_v1alpha4_ClusterResourceSet_To_v1beta1_ClusterResourceSet(src, dst, nil) } func (dst *ClusterResourceSet) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSet) + src := srcRaw.(*addonsv1.ClusterResourceSet) return Convert_v1beta1_ClusterResourceSet_To_v1alpha4_ClusterResourceSet(src, dst, nil) } func (src *ClusterResourceSetList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSetList) + dst := dstRaw.(*addonsv1.ClusterResourceSetList) return Convert_v1alpha4_ClusterResourceSetList_To_v1beta1_ClusterResourceSetList(src, dst, nil) } func (dst *ClusterResourceSetList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSetList) + src := srcRaw.(*addonsv1.ClusterResourceSetList) return Convert_v1beta1_ClusterResourceSetList_To_v1alpha4_ClusterResourceSetList(src, dst, nil) } func (src *ClusterResourceSetBinding) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSetBinding) + dst := dstRaw.(*addonsv1.ClusterResourceSetBinding) return Convert_v1alpha4_ClusterResourceSetBinding_To_v1beta1_ClusterResourceSetBinding(src, dst, nil) } func (dst *ClusterResourceSetBinding) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSetBinding) + src := srcRaw.(*addonsv1.ClusterResourceSetBinding) return Convert_v1beta1_ClusterResourceSetBinding_To_v1alpha4_ClusterResourceSetBinding(src, dst, nil) } func (src *ClusterResourceSetBindingList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.ClusterResourceSetBindingList) + dst := dstRaw.(*addonsv1.ClusterResourceSetBindingList) return Convert_v1alpha4_ClusterResourceSetBindingList_To_v1beta1_ClusterResourceSetBindingList(src, dst, nil) } func (dst *ClusterResourceSetBindingList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.ClusterResourceSetBindingList) + src := srcRaw.(*addonsv1.ClusterResourceSetBindingList) return Convert_v1beta1_ClusterResourceSetBindingList_To_v1alpha4_ClusterResourceSetBindingList(src, dst, nil) } diff --git a/exp/addons/api/v1alpha4/conversion_test.go b/exp/addons/api/v1alpha4/conversion_test.go index 0ca8b1d18fe0..5c8eec356880 100644 --- a/exp/addons/api/v1alpha4/conversion_test.go +++ b/exp/addons/api/v1alpha4/conversion_test.go @@ -19,17 +19,17 @@ package v1alpha4 import ( "testing" - clusterv1addons "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for ClusterResourceSet", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &clusterv1addons.ClusterResourceSet{}, + Hub: &addonsv1.ClusterResourceSet{}, Spoke: &ClusterResourceSet{}, })) t.Run("for ClusterResourceSetBinding", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &clusterv1addons.ClusterResourceSetBinding{}, + Hub: &addonsv1.ClusterResourceSetBinding{}, Spoke: &ClusterResourceSetBinding{}, })) } diff --git a/exp/addons/internal/controllers/suite_test.go b/exp/addons/internal/controllers/suite_test.go index a49c0f52fb11..6053838d220c 100644 --- a/exp/addons/internal/controllers/suite_test.go +++ b/exp/addons/internal/controllers/suite_test.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/cluster-api/api/v1beta1/index" "sigs.k8s.io/cluster-api/controllers/remote" - v1alpha4 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" "sigs.k8s.io/cluster-api/internal/envtest" ) @@ -73,7 +73,7 @@ func TestMain(m *testing.M) { ManagerUncachedObjs: []client.Object{ &corev1.ConfigMap{}, &corev1.Secret{}, - &v1alpha4.ClusterResourceSetBinding{}, + &addonsv1.ClusterResourceSetBinding{}, }, SetupIndexes: setupIndexes, SetupReconcilers: setupReconcilers, diff --git a/exp/api/v1alpha3/condition_consts.go b/exp/api/v1alpha3/condition_consts.go index 95e7df26a797..8465606a672e 100644 --- a/exp/api/v1alpha3/condition_consts.go +++ b/exp/api/v1alpha3/condition_consts.go @@ -16,13 +16,13 @@ limitations under the License. package v1alpha3 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" // Conditions and condition Reasons for the MachinePool object. const ( // ReplicasReadyCondition reports an aggregate of current status of the replicas controlled by the MachinePool. - ReplicasReadyCondition clusterv1.ConditionType = "ReplicasReady" + ReplicasReadyCondition clusterv1alpha3.ConditionType = "ReplicasReady" // WaitingForReplicasReadyReason (Severity=Info) documents a machinepool waiting for the required replicas // to be ready. diff --git a/exp/api/v1alpha3/conversion.go b/exp/api/v1alpha3/conversion.go index a57405232ca2..47cedb064ccf 100644 --- a/exp/api/v1alpha3/conversion.go +++ b/exp/api/v1alpha3/conversion.go @@ -21,15 +21,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) // Convert_v1alpha3_MachinePoolSpec_To_v1beta1_MachinePoolSpec is an autogenerated conversion function. -func Convert_v1alpha3_MachinePoolSpec_To_v1beta1_MachinePoolSpec(in *MachinePoolSpec, out *v1beta1.MachinePoolSpec, s apimachineryconversion.Scope) error { +func Convert_v1alpha3_MachinePoolSpec_To_v1beta1_MachinePoolSpec(in *MachinePoolSpec, out *expv1.MachinePoolSpec, s apimachineryconversion.Scope) error { return autoConvert_v1alpha3_MachinePoolSpec_To_v1beta1_MachinePoolSpec(in, out, s) } -func Convert_v1alpha3_MachinePool_To_v1beta1_MachinePool(in *MachinePool, out *v1beta1.MachinePool, s apimachineryconversion.Scope) error { +func Convert_v1alpha3_MachinePool_To_v1beta1_MachinePool(in *MachinePool, out *expv1.MachinePool, s apimachineryconversion.Scope) error { if err := autoConvert_v1alpha3_MachinePool_To_v1beta1_MachinePool(in, out, s); err != nil { return err } @@ -37,20 +37,20 @@ func Convert_v1alpha3_MachinePool_To_v1beta1_MachinePool(in *MachinePool, out *v // Replace v1alpha3 finalizer to allow old MachinePools to get deleted. if controllerutil.ContainsFinalizer(out, MachinePoolFinalizer) { controllerutil.RemoveFinalizer(out, MachinePoolFinalizer) - controllerutil.AddFinalizer(out, v1beta1.MachinePoolFinalizer) + controllerutil.AddFinalizer(out, expv1.MachinePoolFinalizer) } return nil } -func Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(in *v1beta1.MachinePool, out *MachinePool, s apimachineryconversion.Scope) error { +func Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(in *expv1.MachinePool, out *MachinePool, s apimachineryconversion.Scope) error { if err := autoConvert_v1beta1_MachinePool_To_v1alpha3_MachinePool(in, out, s); err != nil { return err } // Replace v1beta1 finalizer to allow old MachinePools to get deleted. - if controllerutil.ContainsFinalizer(out, v1beta1.MachinePoolFinalizer) { - controllerutil.RemoveFinalizer(out, v1beta1.MachinePoolFinalizer) + if controllerutil.ContainsFinalizer(out, expv1.MachinePoolFinalizer) { + controllerutil.RemoveFinalizer(out, expv1.MachinePoolFinalizer) controllerutil.AddFinalizer(out, MachinePoolFinalizer) } @@ -58,25 +58,25 @@ func Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(in *v1beta1.MachinePool } func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachinePool) + dst := dstRaw.(*expv1.MachinePool) return Convert_v1alpha3_MachinePool_To_v1beta1_MachinePool(src, dst, nil) } func (dst *MachinePool) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachinePool) + src := srcRaw.(*expv1.MachinePool) return Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(src, dst, nil) } func (src *MachinePoolList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachinePoolList) + dst := dstRaw.(*expv1.MachinePoolList) return Convert_v1alpha3_MachinePoolList_To_v1beta1_MachinePoolList(src, dst, nil) } func (dst *MachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachinePoolList) + src := srcRaw.(*expv1.MachinePoolList) return Convert_v1beta1_MachinePoolList_To_v1alpha3_MachinePoolList(src, dst, nil) } diff --git a/exp/api/v1alpha3/conversion_test.go b/exp/api/v1alpha3/conversion_test.go index 0241814c9220..dae60f05c70b 100644 --- a/exp/api/v1alpha3/conversion_test.go +++ b/exp/api/v1alpha3/conversion_test.go @@ -23,14 +23,14 @@ import ( "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for MachinePool", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &clusterv1exp.MachinePool{}, + Hub: &expv1.MachinePool{}, Spoke: &MachinePool{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) @@ -44,14 +44,14 @@ func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } -func BootstrapFuzzer(in *clusterv1.Bootstrap, c fuzz.Continue) { +func BootstrapFuzzer(in *clusterv1alpha3.Bootstrap, c fuzz.Continue) { c.FuzzNoCustom(in) // Bootstrap.Data has been removed in v1alpha4, so setting it to nil in order to avoid v1alpha3 --> --> v1alpha3 round trip errors. in.Data = nil } -func ObjectMetaFuzzer(in *clusterv1.ObjectMeta, c fuzz.Continue) { +func ObjectMetaFuzzer(in *clusterv1alpha3.ObjectMeta, c fuzz.Continue) { c.FuzzNoCustom(in) // These fields have been removed in v1beta1 diff --git a/exp/api/v1alpha3/machinepool_types.go b/exp/api/v1alpha3/machinepool_types.go index b014cb7bbed3..4428ea7e38d3 100644 --- a/exp/api/v1alpha3/machinepool_types.go +++ b/exp/api/v1alpha3/machinepool_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" capierrors "sigs.k8s.io/cluster-api/errors" ) @@ -42,12 +42,12 @@ type MachinePoolSpec struct { Replicas *int32 `json:"replicas,omitempty"` // Template describes the machines that will be created. - Template clusterv1.MachineTemplateSpec `json:"template"` + Template clusterv1alpha3.MachineTemplateSpec `json:"template"` // The deployment strategy to use to replace existing machine instances with // new ones. // +optional - Strategy *clusterv1.MachineDeploymentStrategy `json:"strategy,omitempty"` + Strategy *clusterv1alpha3.MachineDeploymentStrategy `json:"strategy,omitempty"` // Minimum number of seconds for which a newly created machine instances should // be ready. @@ -124,7 +124,7 @@ type MachinePoolStatus struct { // Conditions define the current service state of the MachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"` } // ANCHOR_END: MachinePoolStatus @@ -222,12 +222,12 @@ type MachinePool struct { } // GetConditions returns the set of conditions for this object. -func (m *MachinePool) GetConditions() clusterv1.Conditions { +func (m *MachinePool) GetConditions() clusterv1alpha3.Conditions { return m.Status.Conditions } // SetConditions sets the conditions on this object. -func (m *MachinePool) SetConditions(conditions clusterv1.Conditions) { +func (m *MachinePool) SetConditions(conditions clusterv1alpha3.Conditions) { m.Status.Conditions = conditions } diff --git a/exp/api/v1alpha4/condition_consts.go b/exp/api/v1alpha4/condition_consts.go index e8b32a5a7003..813e49176030 100644 --- a/exp/api/v1alpha4/condition_consts.go +++ b/exp/api/v1alpha4/condition_consts.go @@ -16,13 +16,13 @@ limitations under the License. package v1alpha4 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" +import clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" // Conditions and condition Reasons for the MachinePool object. const ( // ReplicasReadyCondition reports an aggregate of current status of the replicas controlled by the MachinePool. - ReplicasReadyCondition clusterv1.ConditionType = "ReplicasReady" + ReplicasReadyCondition clusterv1alpha4.ConditionType = "ReplicasReady" // WaitingForReplicasReadyReason (Severity=Info) documents a machinepool waiting for the required replicas // to be ready. diff --git a/exp/api/v1alpha4/conversion.go b/exp/api/v1alpha4/conversion.go index 03e5befd83dd..609d15cff4b5 100644 --- a/exp/api/v1alpha4/conversion.go +++ b/exp/api/v1alpha4/conversion.go @@ -19,29 +19,29 @@ package v1alpha4 import ( "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachinePool) + dst := dstRaw.(*expv1.MachinePool) return Convert_v1alpha4_MachinePool_To_v1beta1_MachinePool(src, dst, nil) } func (dst *MachinePool) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachinePool) + src := srcRaw.(*expv1.MachinePool) return Convert_v1beta1_MachinePool_To_v1alpha4_MachinePool(src, dst, nil) } func (src *MachinePoolList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.MachinePoolList) + dst := dstRaw.(*expv1.MachinePoolList) return Convert_v1alpha4_MachinePoolList_To_v1beta1_MachinePoolList(src, dst, nil) } func (dst *MachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.MachinePoolList) + src := srcRaw.(*expv1.MachinePoolList) return Convert_v1beta1_MachinePoolList_To_v1alpha4_MachinePoolList(src, dst, nil) } diff --git a/exp/api/v1alpha4/conversion_test.go b/exp/api/v1alpha4/conversion_test.go index 32e937b47931..bfba3b1fb598 100644 --- a/exp/api/v1alpha4/conversion_test.go +++ b/exp/api/v1alpha4/conversion_test.go @@ -21,13 +21,13 @@ import ( "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for MachinePool", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &clusterv1exp.MachinePool{}, + Hub: &expv1.MachinePool{}, Spoke: &MachinePool{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{}, })) diff --git a/exp/api/v1alpha4/machinepool_types.go b/exp/api/v1alpha4/machinepool_types.go index b4982af3ed23..320464e8dd21 100644 --- a/exp/api/v1alpha4/machinepool_types.go +++ b/exp/api/v1alpha4/machinepool_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" capierrors "sigs.k8s.io/cluster-api/errors" ) @@ -42,7 +42,7 @@ type MachinePoolSpec struct { Replicas *int32 `json:"replicas,omitempty"` // Template describes the machines that will be created. - Template clusterv1.MachineTemplateSpec `json:"template"` + Template clusterv1alpha4.MachineTemplateSpec `json:"template"` // Minimum number of seconds for which a newly created machine instances should // be ready. @@ -119,7 +119,7 @@ type MachinePoolStatus struct { // Conditions define the current service state of the MachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"` } // ANCHOR_END: MachinePoolStatus @@ -218,12 +218,12 @@ type MachinePool struct { } // GetConditions returns the set of conditions for this object. -func (m *MachinePool) GetConditions() clusterv1.Conditions { +func (m *MachinePool) GetConditions() clusterv1alpha4.Conditions { return m.Status.Conditions } // SetConditions sets the conditions on this object. -func (m *MachinePool) SetConditions(conditions clusterv1.Conditions) { +func (m *MachinePool) SetConditions(conditions clusterv1alpha4.Conditions) { m.Status.Conditions = conditions } diff --git a/exp/util/util.go b/exp/util/util.go index 592a8ccd20ce..5ae1a13406af 100644 --- a/exp/util/util.go +++ b/exp/util/util.go @@ -28,11 +28,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) // GetOwnerMachinePool returns the MachinePool objects owning the current resource. -func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1exp.MachinePool, error) { +func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -41,7 +41,7 @@ func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == clusterv1exp.GroupVersion.Group { + if gv.Group == expv1.GroupVersion.Group { return GetMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -49,8 +49,8 @@ func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object } // GetMachinePoolByName finds and returns a MachinePool object usting the specified params. -func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1exp.MachinePool, error) { - m := &clusterv1exp.MachinePool{} +func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expv1.MachinePool, error) { + m := &expv1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err @@ -64,7 +64,7 @@ func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Lo log = log.WithValues("machine-pool-to-infra-map-func", gvk.String()) return func(o client.Object) []reconcile.Request { log := log.WithValues("namespace", o.GetNamespace(), "name", o.GetName()) - m, ok := o.(*clusterv1exp.MachinePool) + m, ok := o.(*expv1.MachinePool) if !ok { log.V(4).Info("not a machine pool") return nil diff --git a/internal/envtest/environment.go b/internal/envtest/environment.go index 166e2b95f70c..16632f51080b 100644 --- a/internal/envtest/environment.go +++ b/internal/envtest/environment.go @@ -51,8 +51,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/cmd/clusterctl/log" - kcpv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - addonv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/internal/builder" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -75,8 +75,8 @@ func init() { utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(bootstrapv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expv1.AddToScheme(scheme.Scheme)) - utilruntime.Must(addonv1.AddToScheme(scheme.Scheme)) - utilruntime.Must(kcpv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(addonsv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(controlplanev1.AddToScheme(scheme.Scheme)) utilruntime.Must(admissionv1.AddToScheme(scheme.Scheme)) } @@ -244,10 +244,10 @@ func newEnvironment(uncachedObjs ...client.Object) *Environment { if err := (&bootstrapv1.KubeadmConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { klog.Fatalf("unable to create webhook: %+v", err) } - if err := (&kcpv1.KubeadmControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&controlplanev1.KubeadmControlPlane{}).SetupWebhookWithManager(mgr); err != nil { klog.Fatalf("unable to create webhook: %+v", err) } - if err := (&addonv1.ClusterResourceSet{}).SetupWebhookWithManager(mgr); err != nil { + if err := (&addonsv1.ClusterResourceSet{}).SetupWebhookWithManager(mgr); err != nil { klog.Fatalf("unable to create webhook for crs: %+v", err) } if err := (&expv1.MachinePool{}).SetupWebhookWithManager(mgr); err != nil { diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index d5fd407a81b8..bfd0f0a1aac8 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -35,7 +35,7 @@ import ( "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1old "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" "sigs.k8s.io/cluster-api/test/e2e/internal/log" @@ -273,7 +273,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg By("Waiting for the machines to exists") Eventually(func() (int64, error) { var n int64 - machineList := &clusterv1old.MachineList{} + machineList := &clusterv1alpha3.MachineList{} if err := managementClusterProxy.GetClient().List(ctx, machineList, client.InNamespace(testNamespace.Name), client.MatchingLabels{clusterv1.ClusterLabelName: workLoadClusterName}); err == nil { for _, machine := range machineList.Items { if machine.Status.NodeRef != nil { @@ -347,7 +347,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Client: managementClusterProxy.GetClient(), Namespace: testNamespace.Name, }, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...) - case discovery.ServerSupportsVersion(managementClusterProxy.GetClientSet().DiscoveryClient, clusterv1old.GroupVersion) == nil: + case discovery.ServerSupportsVersion(managementClusterProxy.GetClientSet().DiscoveryClient, clusterv1alpha3.GroupVersion) == nil: Byf("Deleting all clusters in namespace: %s in management cluster: %s", testNamespace.Name, managementClusterName) deleteAllClustersAndWaitOldAPI(ctx, framework.DeleteAllClustersAndWaitInput{ Client: managementClusterProxy.GetClient(), @@ -429,11 +429,11 @@ func deleteAllClustersAndWaitOldAPI(ctx context.Context, input framework.DeleteA } // getAllClustersByNamespaceOldAPI returns the list of Cluster objects in a namespace using the older API. -func getAllClustersByNamespaceOldAPI(ctx context.Context, input framework.GetAllClustersByNamespaceInput) []*clusterv1old.Cluster { - clusterList := &clusterv1old.ClusterList{} +func getAllClustersByNamespaceOldAPI(ctx context.Context, input framework.GetAllClustersByNamespaceInput) []*clusterv1alpha3.Cluster { + clusterList := &clusterv1alpha3.ClusterList{} Expect(input.Lister.List(ctx, clusterList, client.InNamespace(input.Namespace))).To(Succeed(), "Failed to list clusters in namespace %s", input.Namespace) - clusters := make([]*clusterv1old.Cluster, len(clusterList.Items)) + clusters := make([]*clusterv1alpha3.Cluster, len(clusterList.Items)) for i := range clusterList.Items { clusters[i] = &clusterList.Items[i] } @@ -443,7 +443,7 @@ func getAllClustersByNamespaceOldAPI(ctx context.Context, input framework.GetAll // deleteClusterOldAPIInput is the input for deleteClusterOldAPI. type deleteClusterOldAPIInput struct { Deleter framework.Deleter - Cluster *clusterv1old.Cluster + Cluster *clusterv1alpha3.Cluster } // deleteClusterOldAPI deletes the cluster and waits for everything the cluster owned to actually be gone using the older API. @@ -455,14 +455,14 @@ func deleteClusterOldAPI(ctx context.Context, input deleteClusterOldAPIInput) { // waitForClusterDeletedOldAPIInput is the input for waitForClusterDeletedOldAPI. type waitForClusterDeletedOldAPIInput struct { Getter framework.Getter - Cluster *clusterv1old.Cluster + Cluster *clusterv1alpha3.Cluster } // waitForClusterDeletedOldAPI waits until the cluster object has been deleted using the older API. func waitForClusterDeletedOldAPI(ctx context.Context, input waitForClusterDeletedOldAPIInput, intervals ...interface{}) { By(fmt.Sprintf("Waiting for cluster %s to be deleted", input.Cluster.GetName())) Eventually(func() bool { - cluster := &clusterv1old.Cluster{} + cluster := &clusterv1alpha3.Cluster{} key := client.ObjectKey{ Namespace: input.Cluster.GetNamespace(), Name: input.Cluster.GetName(), diff --git a/test/framework/clusterctl/clusterctl_helpers.go b/test/framework/clusterctl/clusterctl_helpers.go index 7917f11c7464..33bc100bec74 100644 --- a/test/framework/clusterctl/clusterctl_helpers.go +++ b/test/framework/clusterctl/clusterctl_helpers.go @@ -26,7 +26,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/internal/log" ) @@ -202,7 +202,7 @@ type ApplyClusterTemplateAndWaitResult struct { Cluster *clusterv1.Cluster ControlPlane *controlplanev1.KubeadmControlPlane MachineDeployments []*clusterv1.MachineDeployment - MachinePools []*clusterv1exp.MachinePool + MachinePools []*expv1.MachinePool } // ExpectedWorkerNodes returns the expected number of worker nodes that will diff --git a/test/framework/convenience.go b/test/framework/convenience.go index 482af807a25f..9632dfc158ce 100644 --- a/test/framework/convenience.go +++ b/test/framework/convenience.go @@ -26,7 +26,7 @@ import ( apiextensionsv1beta "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - clusterv1old "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -56,7 +56,7 @@ func TryAddDefaultSchemes(scheme *runtime.Scheme) { _ = addonsv1.AddToScheme(scheme) // Add the core CAPI v1alpha3 scheme. - _ = clusterv1old.AddToScheme(scheme) + _ = clusterv1alpha3.AddToScheme(scheme) // Add the kubeadm bootstrapper scheme. _ = bootstrapv1.AddToScheme(scheme) diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go index 6ad5bb5eeb31..03db3fd4d136 100644 --- a/test/framework/machinepool_helpers.go +++ b/test/framework/machinepool_helpers.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework/internal/log" "sigs.k8s.io/cluster-api/util/patch" ) @@ -41,16 +41,16 @@ type GetMachinePoolsByClusterInput struct { // GetMachinePoolsByCluster returns the MachinePools objects for a cluster. // Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so // it is necessary to ensure this is already happened before calling it. -func GetMachinePoolsByCluster(ctx context.Context, input GetMachinePoolsByClusterInput) []*clusterv1exp.MachinePool { +func GetMachinePoolsByCluster(ctx context.Context, input GetMachinePoolsByClusterInput) []*expv1.MachinePool { Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinePoolsByCluster") Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetMachinePoolsByCluster") Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetMachinePoolsByCluster") Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetMachinePoolsByCluster") - mpList := &clusterv1exp.MachinePoolList{} + mpList := &expv1.MachinePoolList{} Expect(input.Lister.List(ctx, mpList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list MachinePools object for Cluster %s/%s", input.Namespace, input.ClusterName) - mps := make([]*clusterv1exp.MachinePool, len(mpList.Items)) + mps := make([]*expv1.MachinePool, len(mpList.Items)) for i := range mpList.Items { mps[i] = &mpList.Items[i] } @@ -60,7 +60,7 @@ func GetMachinePoolsByCluster(ctx context.Context, input GetMachinePoolsByCluste // WaitForMachinePoolNodesToExistInput is the input for WaitForMachinePoolNodesToExist. type WaitForMachinePoolNodesToExistInput struct { Getter Getter - MachinePool *clusterv1exp.MachinePool + MachinePool *expv1.MachinePool } // WaitForMachinePoolNodesToExist waits until all nodes associated with a machine pool exist. @@ -92,7 +92,7 @@ type DiscoveryAndWaitForMachinePoolsInput struct { } // DiscoveryAndWaitForMachinePools discovers the MachinePools existing in a cluster and waits for them to be ready (all the machines provisioned). -func DiscoveryAndWaitForMachinePools(ctx context.Context, input DiscoveryAndWaitForMachinePoolsInput, intervals ...interface{}) []*clusterv1exp.MachinePool { +func DiscoveryAndWaitForMachinePools(ctx context.Context, input DiscoveryAndWaitForMachinePoolsInput, intervals ...interface{}) []*expv1.MachinePool { Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForMachinePools") Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForMachinePools") Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForMachinePools") @@ -115,7 +115,7 @@ type UpgradeMachinePoolAndWaitInput struct { ClusterProxy ClusterProxy Cluster *clusterv1.Cluster UpgradeVersion string - MachinePools []*clusterv1exp.MachinePool + MachinePools []*expv1.MachinePool WaitForMachinePoolToBeUpgraded []interface{} } @@ -155,7 +155,7 @@ type ScaleMachinePoolAndWaitInput struct { ClusterProxy ClusterProxy Cluster *clusterv1.Cluster Replicas int32 - MachinePools []*clusterv1exp.MachinePool + MachinePools []*expv1.MachinePool WaitForMachinePoolToScale []interface{} } @@ -191,7 +191,7 @@ type WaitForMachinePoolInstancesToBeUpgradedInput struct { Cluster *clusterv1.Cluster KubernetesUpgradeVersion string MachineCount int - MachinePool *clusterv1exp.MachinePool + MachinePool *expv1.MachinePool } // WaitForMachinePoolInstancesToBeUpgraded waits until all instances belonging to a MachinePool are upgraded to the correct kubernetes version. @@ -237,7 +237,7 @@ func WaitForMachinePoolInstancesToBeUpgraded(ctx context.Context, input WaitForM type GetMachinesPoolInstancesInput struct { WorkloadClusterGetter Getter Namespace string - MachinePool *clusterv1exp.MachinePool + MachinePool *expv1.MachinePool } // getMachinePoolInstanceVersions returns the Kubernetes versions of the machine pool instances. diff --git a/test/infrastructure/docker/api/v1alpha3/condition_consts.go b/test/infrastructure/docker/api/v1alpha3/condition_consts.go index 3da821ab34b3..a8b797cfe50d 100644 --- a/test/infrastructure/docker/api/v1alpha3/condition_consts.go +++ b/test/infrastructure/docker/api/v1alpha3/condition_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1alpha3 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" +import clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" // Conditions and condition Reasons for the DockerMachine object. @@ -27,7 +27,7 @@ const ( // NOTE: When the container provisioning starts the process completes almost immediately and within // the same reconciliation, so the user will always see a transition from Wait to Provisioned without // having evidence that the operation is started/is in progress. - ContainerProvisionedCondition clusterv1.ConditionType = "ContainerProvisioned" + ContainerProvisionedCondition clusterv1alpha3.ConditionType = "ContainerProvisioned" // WaitingForClusterInfrastructureReason (Severity=Info) documents a DockerMachine waiting for the cluster // infrastructure to be ready before starting to create the container that provides the DockerMachine @@ -50,7 +50,7 @@ const ( // // NOTE as a difference from other providers, container provisioning and bootstrap are directly managed // by the DockerMachine controller (not by cloud-init). - BootstrapExecSucceededCondition clusterv1.ConditionType = "BootstrapExecSucceeded" + BootstrapExecSucceededCondition clusterv1alpha3.ConditionType = "BootstrapExecSucceeded" // BootstrappingReason documents (Severity=Info) a DockerMachine currently executing the bootstrap // script that creates the Kubernetes node on the newly provisioned machine infrastructure. @@ -70,7 +70,7 @@ const ( // NOTE: When the load balancer provisioning starts the process completes almost immediately and within // the same reconciliation, so the user will always see a transition from no condition to available without // having evidence that the operation is started/is in progress. - LoadBalancerAvailableCondition clusterv1.ConditionType = "LoadBalancerAvailable" + LoadBalancerAvailableCondition clusterv1alpha3.ConditionType = "LoadBalancerAvailable" // LoadBalancerProvisioningFailedReason (Severity=Warning) documents a DockerCluster controller detecting // an error while provisioning the container that provides the cluster load balancer.; those kind of diff --git a/test/infrastructure/docker/api/v1alpha3/conversion.go b/test/infrastructure/docker/api/v1alpha3/conversion.go index 6a86157b8630..6bdb64825187 100644 --- a/test/infrastructure/docker/api/v1alpha3/conversion.go +++ b/test/infrastructure/docker/api/v1alpha3/conversion.go @@ -20,19 +20,19 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerCluster) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerCluster) + dst := dstRaw.(*infrav1.DockerCluster) if err := Convert_v1alpha3_DockerCluster_To_v1beta1_DockerCluster(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.DockerCluster{} + restored := &infrav1.DockerCluster{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -49,7 +49,7 @@ func (src *DockerCluster) ConvertTo(dstRaw conversion.Hub) error { } func (dst *DockerCluster) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerCluster) + src := srcRaw.(*infrav1.DockerCluster) if err := Convert_v1beta1_DockerCluster_To_v1alpha3_DockerCluster(src, dst, nil); err != nil { return err @@ -64,50 +64,50 @@ func (dst *DockerCluster) ConvertFrom(srcRaw conversion.Hub) error { } func (src *DockerClusterList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerClusterList) + dst := dstRaw.(*infrav1.DockerClusterList) return Convert_v1alpha3_DockerClusterList_To_v1beta1_DockerClusterList(src, dst, nil) } func (dst *DockerClusterList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerClusterList) + src := srcRaw.(*infrav1.DockerClusterList) return Convert_v1beta1_DockerClusterList_To_v1alpha3_DockerClusterList(src, dst, nil) } func (src *DockerMachine) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachine) + dst := dstRaw.(*infrav1.DockerMachine) return Convert_v1alpha3_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil) } func (dst *DockerMachine) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachine) + src := srcRaw.(*infrav1.DockerMachine) return Convert_v1beta1_DockerMachine_To_v1alpha3_DockerMachine(src, dst, nil) } func (src *DockerMachineList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachineList) + dst := dstRaw.(*infrav1.DockerMachineList) return Convert_v1alpha3_DockerMachineList_To_v1beta1_DockerMachineList(src, dst, nil) } func (dst *DockerMachineList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachineList) + src := srcRaw.(*infrav1.DockerMachineList) return Convert_v1beta1_DockerMachineList_To_v1alpha3_DockerMachineList(src, dst, nil) } func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachineTemplate) + dst := dstRaw.(*infrav1.DockerMachineTemplate) if err := Convert_v1alpha3_DockerMachineTemplate_To_v1beta1_DockerMachineTemplate(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.DockerMachineTemplate{} + restored := &infrav1.DockerMachineTemplate{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -118,7 +118,7 @@ func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } func (dst *DockerMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachineTemplate) + src := srcRaw.(*infrav1.DockerMachineTemplate) if err := Convert_v1beta1_DockerMachineTemplate_To_v1alpha3_DockerMachineTemplate(src, dst, nil); err != nil { return err @@ -133,24 +133,24 @@ func (dst *DockerMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error { } func (src *DockerMachineTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachineTemplateList) + dst := dstRaw.(*infrav1.DockerMachineTemplateList) return Convert_v1alpha3_DockerMachineTemplateList_To_v1beta1_DockerMachineTemplateList(src, dst, nil) } func (dst *DockerMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachineTemplateList) + src := srcRaw.(*infrav1.DockerMachineTemplateList) return Convert_v1beta1_DockerMachineTemplateList_To_v1alpha3_DockerMachineTemplateList(src, dst, nil) } // Convert_v1beta1_DockerClusterSpec_To_v1alpha3_DockerClusterSpec is an autogenerated conversion function. -func Convert_v1beta1_DockerClusterSpec_To_v1alpha3_DockerClusterSpec(in *v1beta1.DockerClusterSpec, out *DockerClusterSpec, s apiconversion.Scope) error { +func Convert_v1beta1_DockerClusterSpec_To_v1alpha3_DockerClusterSpec(in *infrav1.DockerClusterSpec, out *DockerClusterSpec, s apiconversion.Scope) error { // DockerClusterSpec.LoadBalancer was added in v1alpha4, so automatic conversion is not possible return autoConvert_v1beta1_DockerClusterSpec_To_v1alpha3_DockerClusterSpec(in, out, s) } -func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemplateResource(in *v1beta1.DockerMachineTemplateResource, out *DockerMachineTemplateResource, s apiconversion.Scope) error { +func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemplateResource(in *infrav1.DockerMachineTemplateResource, out *DockerMachineTemplateResource, s apiconversion.Scope) error { // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemplateResource(in, out, s) } diff --git a/test/infrastructure/docker/api/v1alpha3/conversion_test.go b/test/infrastructure/docker/api/v1alpha3/conversion_test.go index 73220aeff14f..234cdc14c917 100644 --- a/test/infrastructure/docker/api/v1alpha3/conversion_test.go +++ b/test/infrastructure/docker/api/v1alpha3/conversion_test.go @@ -19,23 +19,23 @@ package v1alpha3 import ( "testing" - "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for DockerCluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.DockerCluster{}, + Hub: &infrav1.DockerCluster{}, Spoke: &DockerCluster{}, })) t.Run("for DockerMachine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.DockerMachine{}, + Hub: &infrav1.DockerMachine{}, Spoke: &DockerMachine{}, })) t.Run("for DockerMachineTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.DockerMachineTemplate{}, + Hub: &infrav1.DockerMachineTemplate{}, Spoke: &DockerMachineTemplate{}, })) } diff --git a/test/infrastructure/docker/api/v1alpha3/dockercluster_types.go b/test/infrastructure/docker/api/v1alpha3/dockercluster_types.go index d91fe09f3a16..4cc8ccdf92ce 100644 --- a/test/infrastructure/docker/api/v1alpha3/dockercluster_types.go +++ b/test/infrastructure/docker/api/v1alpha3/dockercluster_types.go @@ -19,7 +19,7 @@ package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" ) const ( @@ -42,7 +42,7 @@ type DockerClusterSpec struct { // Instead, the docker cluster controller will simply copy these into the Status and allow the Cluster API // controllers to do what they will with the defined failure domains. // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1alpha3.FailureDomains `json:"failureDomains,omitempty"` } // DockerClusterStatus defines the observed state of DockerCluster. @@ -52,11 +52,11 @@ type DockerClusterStatus struct { // FailureDomains don't mean much in CAPD since it's all local, but we can see how the rest of cluster API // will use this if we populate it. - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1alpha3.FailureDomains `json:"failureDomains,omitempty"` // Conditions defines current service state of the DockerCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"` } // APIEndpoint represents a reachable Kubernetes API endpoint. @@ -82,12 +82,12 @@ type DockerCluster struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerCluster) GetConditions() clusterv1.Conditions { +func (c *DockerCluster) GetConditions() clusterv1alpha3.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerCluster) SetConditions(conditions clusterv1.Conditions) { +func (c *DockerCluster) SetConditions(conditions clusterv1alpha3.Conditions) { c.Status.Conditions = conditions } diff --git a/test/infrastructure/docker/api/v1alpha3/dockermachine_types.go b/test/infrastructure/docker/api/v1alpha3/dockermachine_types.go index 978a0c168b83..a503d2aa40c2 100644 --- a/test/infrastructure/docker/api/v1alpha3/dockermachine_types.go +++ b/test/infrastructure/docker/api/v1alpha3/dockermachine_types.go @@ -19,7 +19,7 @@ package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" ) const ( @@ -84,11 +84,11 @@ type DockerMachineStatus struct { // Addresses contains the associated addresses for the docker machine. // +optional - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1alpha3.MachineAddress `json:"addresses,omitempty"` // Conditions defines current service state of the DockerMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"` } // +kubebuilder:resource:path=dockermachines,scope=Namespaced,categories=cluster-api @@ -105,12 +105,12 @@ type DockerMachine struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerMachine) GetConditions() clusterv1.Conditions { +func (c *DockerMachine) GetConditions() clusterv1alpha3.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerMachine) SetConditions(conditions clusterv1.Conditions) { +func (c *DockerMachine) SetConditions(conditions clusterv1alpha3.Conditions) { c.Status.Conditions = conditions } diff --git a/test/infrastructure/docker/api/v1alpha4/conversion.go b/test/infrastructure/docker/api/v1alpha4/conversion.go index e919628debe4..a1b42d8f4d53 100644 --- a/test/infrastructure/docker/api/v1alpha4/conversion.go +++ b/test/infrastructure/docker/api/v1alpha4/conversion.go @@ -20,43 +20,43 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerCluster) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerCluster) + dst := dstRaw.(*infrav1.DockerCluster) return Convert_v1alpha4_DockerCluster_To_v1beta1_DockerCluster(src, dst, nil) } func (dst *DockerCluster) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerCluster) + src := srcRaw.(*infrav1.DockerCluster) return Convert_v1beta1_DockerCluster_To_v1alpha4_DockerCluster(src, dst, nil) } func (src *DockerClusterList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerClusterList) + dst := dstRaw.(*infrav1.DockerClusterList) return Convert_v1alpha4_DockerClusterList_To_v1beta1_DockerClusterList(src, dst, nil) } func (dst *DockerClusterList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerClusterList) + src := srcRaw.(*infrav1.DockerClusterList) return Convert_v1beta1_DockerClusterList_To_v1alpha4_DockerClusterList(src, dst, nil) } func (src *DockerClusterTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerClusterTemplate) + dst := dstRaw.(*infrav1.DockerClusterTemplate) if err := Convert_v1alpha4_DockerClusterTemplate_To_v1beta1_DockerClusterTemplate(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.DockerClusterTemplate{} + restored := &infrav1.DockerClusterTemplate{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -67,7 +67,7 @@ func (src *DockerClusterTemplate) ConvertTo(dstRaw conversion.Hub) error { } func (dst *DockerClusterTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerClusterTemplate) + src := srcRaw.(*infrav1.DockerClusterTemplate) if err := Convert_v1beta1_DockerClusterTemplate_To_v1alpha4_DockerClusterTemplate(src, dst, nil); err != nil { return err @@ -82,50 +82,50 @@ func (dst *DockerClusterTemplate) ConvertFrom(srcRaw conversion.Hub) error { } func (src *DockerClusterTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerClusterTemplateList) + dst := dstRaw.(*infrav1.DockerClusterTemplateList) return Convert_v1alpha4_DockerClusterTemplateList_To_v1beta1_DockerClusterTemplateList(src, dst, nil) } func (dst *DockerClusterTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerClusterTemplateList) + src := srcRaw.(*infrav1.DockerClusterTemplateList) return Convert_v1beta1_DockerClusterTemplateList_To_v1alpha4_DockerClusterTemplateList(src, dst, nil) } func (src *DockerMachine) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachine) + dst := dstRaw.(*infrav1.DockerMachine) return Convert_v1alpha4_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil) } func (dst *DockerMachine) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachine) + src := srcRaw.(*infrav1.DockerMachine) return Convert_v1beta1_DockerMachine_To_v1alpha4_DockerMachine(src, dst, nil) } func (src *DockerMachineList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachineList) + dst := dstRaw.(*infrav1.DockerMachineList) return Convert_v1alpha4_DockerMachineList_To_v1beta1_DockerMachineList(src, dst, nil) } func (dst *DockerMachineList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachineList) + src := srcRaw.(*infrav1.DockerMachineList) return Convert_v1beta1_DockerMachineList_To_v1alpha4_DockerMachineList(src, dst, nil) } func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachineTemplate) + dst := dstRaw.(*infrav1.DockerMachineTemplate) if err := Convert_v1alpha4_DockerMachineTemplate_To_v1beta1_DockerMachineTemplate(src, dst, nil); err != nil { return err } // Manually restore data. - restored := &v1beta1.DockerMachineTemplate{} + restored := &infrav1.DockerMachineTemplate{} if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } @@ -136,7 +136,7 @@ func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } func (dst *DockerMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachineTemplate) + src := srcRaw.(*infrav1.DockerMachineTemplate) if err := Convert_v1beta1_DockerMachineTemplate_To_v1alpha4_DockerMachineTemplate(src, dst, nil); err != nil { return err @@ -151,23 +151,23 @@ func (dst *DockerMachineTemplate) ConvertFrom(srcRaw conversion.Hub) error { } func (src *DockerMachineTemplateList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.DockerMachineTemplateList) + dst := dstRaw.(*infrav1.DockerMachineTemplateList) return Convert_v1alpha4_DockerMachineTemplateList_To_v1beta1_DockerMachineTemplateList(src, dst, nil) } func (dst *DockerMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.DockerMachineTemplateList) + src := srcRaw.(*infrav1.DockerMachineTemplateList) return Convert_v1beta1_DockerMachineTemplateList_To_v1alpha4_DockerMachineTemplateList(src, dst, nil) } -func Convert_v1beta1_DockerClusterTemplateResource_To_v1alpha4_DockerClusterTemplateResource(in *v1beta1.DockerClusterTemplateResource, out *DockerClusterTemplateResource, s apiconversion.Scope) error { +func Convert_v1beta1_DockerClusterTemplateResource_To_v1alpha4_DockerClusterTemplateResource(in *infrav1.DockerClusterTemplateResource, out *DockerClusterTemplateResource, s apiconversion.Scope) error { // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerClusterTemplateResource_To_v1alpha4_DockerClusterTemplateResource(in, out, s) } -func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(in *v1beta1.DockerMachineTemplateResource, out *DockerMachineTemplateResource, s apiconversion.Scope) error { +func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(in *infrav1.DockerMachineTemplateResource, out *DockerMachineTemplateResource, s apiconversion.Scope) error { // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(in, out, s) } diff --git a/test/infrastructure/docker/api/v1alpha4/conversion_test.go b/test/infrastructure/docker/api/v1alpha4/conversion_test.go index 4132b38d8975..e48abaeedd51 100644 --- a/test/infrastructure/docker/api/v1alpha4/conversion_test.go +++ b/test/infrastructure/docker/api/v1alpha4/conversion_test.go @@ -19,28 +19,28 @@ package v1alpha4 import ( "testing" - "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { t.Run("for DockerCluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.DockerCluster{}, + Hub: &infrav1.DockerCluster{}, Spoke: &DockerCluster{}, })) t.Run("for DockerClusterTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.DockerClusterTemplate{}, + Hub: &infrav1.DockerClusterTemplate{}, Spoke: &DockerClusterTemplate{}, })) t.Run("for DockerMachine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.DockerMachine{}, + Hub: &infrav1.DockerMachine{}, Spoke: &DockerMachine{}, })) t.Run("for DockerMachineTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Hub: &v1beta1.DockerMachineTemplate{}, + Hub: &infrav1.DockerMachineTemplate{}, Spoke: &DockerMachineTemplate{}, })) } diff --git a/test/infrastructure/docker/api/v1alpha4/dockercluster_types.go b/test/infrastructure/docker/api/v1alpha4/dockercluster_types.go index 7cdea6150d40..766d66ffc99a 100644 --- a/test/infrastructure/docker/api/v1alpha4/dockercluster_types.go +++ b/test/infrastructure/docker/api/v1alpha4/dockercluster_types.go @@ -19,7 +19,7 @@ package v1alpha4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" ) const ( @@ -42,7 +42,7 @@ type DockerClusterSpec struct { // Instead, the docker cluster controller will simply copy these into the Status and allow the Cluster API // controllers to do what they will with the defined failure domains. // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1alpha4.FailureDomains `json:"failureDomains,omitempty"` // LoadBalancer allows defining configurations for the cluster load balancer. // +optional @@ -76,11 +76,11 @@ type DockerClusterStatus struct { // FailureDomains don't mean much in CAPD since it's all local, but we can see how the rest of cluster API // will use this if we populate it. - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1alpha4.FailureDomains `json:"failureDomains,omitempty"` // Conditions defines current service state of the DockerCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"` } // APIEndpoint represents a reachable Kubernetes API endpoint. @@ -107,12 +107,12 @@ type DockerCluster struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerCluster) GetConditions() clusterv1.Conditions { +func (c *DockerCluster) GetConditions() clusterv1alpha4.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerCluster) SetConditions(conditions clusterv1.Conditions) { +func (c *DockerCluster) SetConditions(conditions clusterv1alpha4.Conditions) { c.Status.Conditions = conditions } diff --git a/test/infrastructure/docker/api/v1alpha4/dockermachine_types.go b/test/infrastructure/docker/api/v1alpha4/dockermachine_types.go index 668598057c8e..f567c90b3e3b 100644 --- a/test/infrastructure/docker/api/v1alpha4/dockermachine_types.go +++ b/test/infrastructure/docker/api/v1alpha4/dockermachine_types.go @@ -19,7 +19,7 @@ package v1alpha4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" ) const ( @@ -84,11 +84,11 @@ type DockerMachineStatus struct { // Addresses contains the associated addresses for the docker machine. // +optional - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1alpha4.MachineAddress `json:"addresses,omitempty"` // Conditions defines current service state of the DockerMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"` } // +kubebuilder:resource:path=dockermachines,scope=Namespaced,categories=cluster-api @@ -106,12 +106,12 @@ type DockerMachine struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerMachine) GetConditions() clusterv1.Conditions { +func (c *DockerMachine) GetConditions() clusterv1alpha4.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerMachine) SetConditions(conditions clusterv1.Conditions) { +func (c *DockerMachine) SetConditions(conditions clusterv1alpha4.Conditions) { c.Status.Conditions = conditions } diff --git a/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go index a081f2b93b8a..47947b7338ec 100644 --- a/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go +++ b/test/infrastructure/docker/exp/api/v1alpha3/dockermachinepool_types.go @@ -19,8 +19,8 @@ package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" - infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" + clusterv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" + infrav1alpha3 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" ) const ( @@ -43,7 +43,7 @@ type DockerMachinePoolMachineTemplate struct { // ExtraMounts describes additional mount points for the node container // These may be used to bind a hostPath // +optional - ExtraMounts []infrav1.Mount `json:"extraMounts,omitempty"` + ExtraMounts []infrav1alpha3.Mount `json:"extraMounts,omitempty"` } // DockerMachinePoolSpec defines the desired state of DockerMachinePool. @@ -81,14 +81,14 @@ type DockerMachinePoolStatus struct { // Conditions defines current service state of the DockerMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha3.Conditions `json:"conditions,omitempty"` } // DockerMachinePoolInstanceStatus contains status information about a DockerMachinePool. type DockerMachinePoolInstanceStatus struct { // Addresses contains the associated addresses for the docker machine. // +optional - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1alpha3.MachineAddress `json:"addresses,omitempty"` // InstanceName is the identification of the Machine Instance within the Machine Pool InstanceName string `json:"instanceName,omitempty"` @@ -125,12 +125,12 @@ type DockerMachinePool struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerMachinePool) GetConditions() clusterv1.Conditions { +func (c *DockerMachinePool) GetConditions() clusterv1alpha3.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { +func (c *DockerMachinePool) SetConditions(conditions clusterv1alpha3.Conditions) { c.Status.Conditions = conditions } diff --git a/test/infrastructure/docker/exp/api/v1alpha4/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1alpha4/dockermachinepool_types.go index da32f8b67ef0..dc508f9deac1 100644 --- a/test/infrastructure/docker/exp/api/v1alpha4/dockermachinepool_types.go +++ b/test/infrastructure/docker/exp/api/v1alpha4/dockermachinepool_types.go @@ -19,8 +19,8 @@ package v1alpha4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4" + clusterv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" + infrav1alpha4 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4" ) const ( @@ -43,7 +43,7 @@ type DockerMachinePoolMachineTemplate struct { // ExtraMounts describes additional mount points for the node container // These may be used to bind a hostPath // +optional - ExtraMounts []infrav1.Mount `json:"extraMounts,omitempty"` + ExtraMounts []infrav1alpha4.Mount `json:"extraMounts,omitempty"` } // DockerMachinePoolSpec defines the desired state of DockerMachinePool. @@ -81,14 +81,14 @@ type DockerMachinePoolStatus struct { // Conditions defines current service state of the DockerMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1alpha4.Conditions `json:"conditions,omitempty"` } // DockerMachinePoolInstanceStatus contains status information about a DockerMachinePool. type DockerMachinePoolInstanceStatus struct { // Addresses contains the associated addresses for the docker machine. // +optional - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1alpha4.MachineAddress `json:"addresses,omitempty"` // InstanceName is the identification of the Machine Instance within the Machine Pool InstanceName string `json:"instanceName,omitempty"` @@ -126,12 +126,12 @@ type DockerMachinePool struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerMachinePool) GetConditions() clusterv1.Conditions { +func (c *DockerMachinePool) GetConditions() clusterv1alpha4.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { +func (c *DockerMachinePool) SetConditions(conditions clusterv1alpha4.Conditions) { c.Status.Conditions = conditions } diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index e55d95fff596..4bcca23a49ec 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -33,10 +33,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" utilexp "sigs.k8s.io/cluster-api/exp/util" "sigs.k8s.io/cluster-api/test/infrastructure/container" - infrav1exp "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/internal/docker" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" @@ -60,7 +60,7 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re ctx = container.RuntimeInto(ctx, r.ContainerRuntime) // Fetch the DockerMachinePool instance. - dockerMachinePool := &infrav1exp.DockerMachinePool{} + dockerMachinePool := &infraexpv1.DockerMachinePool{} if err := r.Client.Get(ctx, req.NamespacedName, dockerMachinePool); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil @@ -111,8 +111,8 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re }() // Add finalizer first if not exist to avoid the race condition between init and delete - if !controllerutil.ContainsFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) { - controllerutil.AddFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) + if !controllerutil.ContainsFinalizer(dockerMachinePool, infraexpv1.MachinePoolFinalizer) { + controllerutil.AddFinalizer(dockerMachinePool, infraexpv1.MachinePoolFinalizer) return ctrl.Result{}, nil } @@ -127,19 +127,19 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re // SetupWithManager will add watches for this controller. func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - clusterToDockerMachinePools, err := util.ClusterToObjectsMapper(mgr.GetClient(), &infrav1exp.DockerMachinePoolList{}, mgr.GetScheme()) + clusterToDockerMachinePools, err := util.ClusterToObjectsMapper(mgr.GetClient(), &infraexpv1.DockerMachinePoolList{}, mgr.GetScheme()) if err != nil { return err } c, err := ctrl.NewControllerManagedBy(mgr). - For(&infrav1exp.DockerMachinePool{}). + For(&infraexpv1.DockerMachinePool{}). WithOptions(options). WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). Watches( - &source.Kind{Type: &clusterv1exp.MachinePool{}}, + &source.Kind{Type: &expv1.MachinePool{}}, handler.EnqueueRequestsFromMapFunc(utilexp.MachinePoolToInfrastructureMapFunc( - infrav1exp.GroupVersion.WithKind("DockerMachinePool"), ctrl.LoggerFrom(ctx))), + infraexpv1.GroupVersion.WithKind("DockerMachinePool"), ctrl.LoggerFrom(ctx))), ). Build(r) if err != nil { @@ -152,7 +152,7 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ) } -func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1exp.MachinePool, dockerMachinePool *infrav1exp.DockerMachinePool) (ctrl.Result, error) { +func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool) (ctrl.Result, error) { pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to build new node pool") @@ -162,11 +162,11 @@ func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, clust return ctrl.Result{}, errors.Wrap(err, "failed to delete all machines in the node pool") } - controllerutil.RemoveFinalizer(dockerMachinePool, infrav1exp.MachinePoolFinalizer) + controllerutil.RemoveFinalizer(dockerMachinePool, infraexpv1.MachinePoolFinalizer) return ctrl.Result{}, nil } -func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machinePool *clusterv1exp.MachinePool, dockerMachinePool *infrav1exp.DockerMachinePool) (ctrl.Result, error) { +func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) // Make sure bootstrap data is available and populated. @@ -214,7 +214,7 @@ func getDockerMachinePoolProviderID(clusterName, dockerMachinePoolName string) s return fmt.Sprintf("docker:////%s-dmp-%s", clusterName, dockerMachinePoolName) } -func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infrav1exp.DockerMachinePool) error { +func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infraexpv1.DockerMachinePool) error { // TODO: add conditions // Patch the object, ignoring conflicts on the conditions owned by this controller. diff --git a/test/infrastructure/docker/exp/internal/docker/nodepool.go b/test/infrastructure/docker/exp/internal/docker/nodepool.go index 624c7a50bf02..eedc5a6f037c 100644 --- a/test/infrastructure/docker/exp/internal/docker/nodepool.go +++ b/test/infrastructure/docker/exp/internal/docker/nodepool.go @@ -31,8 +31,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" - infrav1exp "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/container" @@ -48,14 +48,14 @@ const ( type NodePool struct { client client.Client cluster *clusterv1.Cluster - machinePool *clusterv1exp.MachinePool - dockerMachinePool *infrav1exp.DockerMachinePool + machinePool *expv1.MachinePool + dockerMachinePool *infraexpv1.DockerMachinePool labelFilters map[string]string machines []*docker.Machine } // NewNodePool creates a new node pool instances. -func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, mp *clusterv1exp.MachinePool, dmp *infrav1exp.DockerMachinePool) (*NodePool, error) { +func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, mp *expv1.MachinePool, dmp *infraexpv1.DockerMachinePool) (*NodePool, error) { np := &NodePool{ client: c, cluster: cluster, @@ -123,7 +123,7 @@ func (np *NodePool) ReconcileMachines(ctx context.Context) (ctrl.Result, error) // First remove instance status for machines no longer existing, then reconcile the existing machines. // NOTE: the status is the only source of truth for understanding if the machine is already bootstrapped, ready etc. // so we are preserving the existing status and using it as a bases for the next reconcile machine. - instances := make([]infrav1exp.DockerMachinePoolInstanceStatus, 0, len(np.machines)) + instances := make([]infraexpv1.DockerMachinePoolInstanceStatus, 0, len(np.machines)) for i := range np.dockerMachinePool.Status.Instances { instance := np.dockerMachinePool.Status.Instances[i] for j := range np.machines { @@ -217,7 +217,7 @@ func (np *NodePool) refresh(ctx context.Context) error { func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machine) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - var machineStatus infrav1exp.DockerMachinePoolInstanceStatus + var machineStatus infraexpv1.DockerMachinePoolInstanceStatus isFound := false for _, instanceStatus := range np.dockerMachinePool.Status.Instances { if instanceStatus.InstanceName == machine.Name() { @@ -227,7 +227,7 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin } if !isFound { log.Info("Creating instance record", "instance", machine.Name()) - machineStatus = infrav1exp.DockerMachinePoolInstanceStatus{ + machineStatus = infraexpv1.DockerMachinePoolInstanceStatus{ InstanceName: machine.Name(), Version: np.machinePool.Spec.Template.Spec.Version, } @@ -322,7 +322,7 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin } // getBootstrapData fetches the bootstrap data for the machine pool. -func getBootstrapData(ctx context.Context, c client.Client, machinePool *clusterv1exp.MachinePool) (string, bootstrapv1.Format, error) { +func getBootstrapData(ctx context.Context, c client.Client, machinePool *expv1.MachinePool) (string, bootstrapv1.Format, error) { if machinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { return "", "", errors.New("error retrieving bootstrap data: linked MachinePool's bootstrap.dataSecretName is nil") } diff --git a/test/infrastructure/docker/internal/docker/kind_manager_test.go b/test/infrastructure/docker/internal/docker/kind_manager_test.go index 7f2cde45620d..0a616611e03e 100644 --- a/test/infrastructure/docker/internal/docker/kind_manager_test.go +++ b/test/infrastructure/docker/internal/docker/kind_manager_test.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" "sigs.k8s.io/kind/pkg/cluster/constants" - "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/container" ) @@ -49,7 +49,7 @@ func TestCreateNode(t *testing.T) { Role: constants.ControlPlaneNodeRoleValue, PortMappings: portMappingsWithAPIServer, Mounts: []v1alpha4.Mount{}, - IPFamily: v1beta1.IPv4IPFamily, + IPFamily: clusterv1.IPv4IPFamily, } _, err := createNode(ctx, createOpts) @@ -76,7 +76,7 @@ func TestCreateControlPlaneNode(t *testing.T) { containerRuntime.ResetRunContainerCallLogs() m := Manager{} - node, err := m.CreateControlPlaneNode(ctx, "TestName", "TestImage", "TestCluster", "100.100.100.100", 80, []v1alpha4.Mount{}, []v1alpha4.PortMapping{}, make(map[string]string), v1beta1.IPv4IPFamily) + node, err := m.CreateControlPlaneNode(ctx, "TestName", "TestImage", "TestCluster", "100.100.100.100", 80, []v1alpha4.Mount{}, []v1alpha4.PortMapping{}, make(map[string]string), clusterv1.IPv4IPFamily) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(node.Role()).Should(Equal(constants.ControlPlaneNodeRoleValue)) @@ -99,7 +99,7 @@ func TestCreateWorkerNode(t *testing.T) { containerRuntime.ResetRunContainerCallLogs() m := Manager{} - node, err := m.CreateWorkerNode(ctx, "TestName", "TestImage", "TestCluster", []v1alpha4.Mount{}, []v1alpha4.PortMapping{}, make(map[string]string), v1beta1.IPv4IPFamily) + node, err := m.CreateWorkerNode(ctx, "TestName", "TestImage", "TestCluster", []v1alpha4.Mount{}, []v1alpha4.PortMapping{}, make(map[string]string), clusterv1.IPv4IPFamily) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(node.Role()).Should(Equal(constants.WorkerNodeRoleValue)) @@ -122,7 +122,7 @@ func TestCreateExternalLoadBalancerNode(t *testing.T) { containerRuntime.ResetRunContainerCallLogs() m := Manager{} - node, err := m.CreateExternalLoadBalancerNode(ctx, "TestName", "TestImage", "TestCluster", "100.100.100.100", 0, v1beta1.IPv4IPFamily) + node, err := m.CreateExternalLoadBalancerNode(ctx, "TestName", "TestImage", "TestCluster", "100.100.100.100", 0, clusterv1.IPv4IPFamily) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(node.Role()).Should(Equal(constants.ExternalLoadBalancerNodeRoleValue))