Skip to content

Commit

Permalink
ipam: Remove cluster-pool-v2beta operator implementation
Browse files Browse the repository at this point in the history
This mostly reverts commit 3d6b9a8.

The changes in podcidr.go have been restored to the behavior before the
cluster-pool-v2beta code was added.

Signed-off-by: Sebastian Wicki <[email protected]>
  • Loading branch information
gandro committed Sep 7, 2023
1 parent ae25fca commit 22bb525
Show file tree
Hide file tree
Showing 8 changed files with 46 additions and 823 deletions.
2 changes: 1 addition & 1 deletion install/kubernetes/cilium/templates/cilium-configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -912,7 +912,7 @@ data:
ipam-cilium-node-update-rate: {{ include "validateDuration" .Values.ipam.ciliumNodeUpdateRate | quote }}
{{- end }}

{{- if or (eq $ipam "cluster-pool") (eq $ipam "cluster-pool-v2beta") }}
{{- if (eq $ipam "cluster-pool") }}
{{- if .Values.ipv4.enabled }}
{{- if hasKey .Values.ipam.operator "clusterPoolIPv4PodCIDR" }}
{{- /* ipam.operator.clusterPoolIPv4PodCIDR removed in v1.14, remove this failsafe around v1.17 */ -}}
Expand Down
2 changes: 1 addition & 1 deletion operator/cmd/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func InitGlobalFlags(cmd *cobra.Command, vp *viper.Viper) {
return "cilium-operator-azure"
case ipamOption.IPAMAlibabaCloud:
return "cilium-operator-alibabacloud"
case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool, ipamOption.IPAMClusterPoolV2, ipamOption.IPAMCRD:
case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool, ipamOption.IPAMCRD:
return "cilium-operator-generic"
default:
return ""
Expand Down
1 change: 0 additions & 1 deletion operator/cmd/provider_operator_register.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,5 @@ import (

func init() {
allocatorProviders[ipamOption.IPAMClusterPool] = &clusterpool.AllocatorOperator{}
allocatorProviders[ipamOption.IPAMClusterPoolV2] = &clusterpool.AllocatorOperator{}
allocatorProviders[ipamOption.IPAMMultiPool] = &multipool.Allocator{}
}
3 changes: 1 addition & 2 deletions operator/cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,6 @@ func (legacy *legacyOnLeader) onStart(_ hive.HookContext) error {
case ipamOption.IPAMAzure,
ipamOption.IPAMENI,
ipamOption.IPAMClusterPool,
ipamOption.IPAMClusterPoolV2,
ipamOption.IPAMMultiPool,
ipamOption.IPAMAlibabaCloud:
alloc, providerBuiltin := allocatorProviders[ipamMode]
Expand Down Expand Up @@ -650,7 +649,7 @@ func (legacy *legacyOnLeader) onStart(_ hive.HookContext) error {
}
}

if option.Config.IPAM == ipamOption.IPAMClusterPool || option.Config.IPAM == ipamOption.IPAMClusterPoolV2 || option.Config.IPAM == ipamOption.IPAMMultiPool {
if option.Config.IPAM == ipamOption.IPAMClusterPool || option.Config.IPAM == ipamOption.IPAMMultiPool {
// We will use CiliumNodes as the source of truth for the podCIDRs.
// Once the CiliumNodes are synchronized with the operator we will
// be able to watch for K8s Node events which they will be used
Expand Down
48 changes: 13 additions & 35 deletions pkg/ipam/allocator/podcidr/podcidr.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,10 @@ import (
"github.com/cilium/cilium/pkg/controller"
ipPkg "github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/ipam"
ipamOption "github.com/cilium/cilium/pkg/ipam/option"
v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/revert"
"github.com/cilium/cilium/pkg/trigger"
)
Expand Down Expand Up @@ -343,36 +341,17 @@ func (n *NodesPodCIDRManager) Upsert(node *v2.CiliumNode) {

// Needs n.Mutex to be held.
func (n *NodesPodCIDRManager) upsertLocked(node *v2.CiliumNode) {
var (
updateStatus, updateSpec bool
cn *v2.CiliumNode
err error
)
if option.Config.IPAMMode() == ipamOption.IPAMClusterPoolV2 {
cn, updateSpec, updateStatus, err = n.allocateNodeV2(node)
if err != nil {
return
}
} else {
// FIXME: This code block falls back to the old behavior of clusterpool,
// where we only assign one pod CIDR for IPv4 and IPv6. Once v2 becomes
// fully backwards compatible with v1, we can remove this else block.
var allocated bool
cn, allocated, updateStatus, err = n.allocateNode(node)
if err != nil {
return
}
// if allocated is false it means that we were unable to allocate
// a CIDR so we need to update the status of the node into k8s.
updateStatus = !allocated && updateStatus
// ClusterPool v1 never updates both the spec and the status
updateSpec = !updateStatus
cn, allocated, updateStatus, err := n.allocateNode(node)
if err != nil {
return
}
if cn == nil {
// no-op
return
}
if updateStatus {
// if allocated is false it means that we were unable to allocate
// a CIDR so we need to update the status of the node into k8s.
if !allocated && updateStatus {
// the n.syncNode will never fail because it's only adding elements to a
// map.
// NodesPodCIDRManager will later on sync the node into k8s by the
Expand All @@ -386,15 +365,14 @@ func (n *NodesPodCIDRManager) upsertLocked(node *v2.CiliumNode) {
} else {
n.syncNode(k8sOpCreate, cn)
}
return
}
if updateSpec {
// If the resource version is != "" it means the object already exists
// in kubernetes so we should perform an update instead of a create.
if cn.GetResourceVersion() != "" {
n.syncNode(k8sOpUpdate, cn)
} else {
n.syncNode(k8sOpCreate, cn)
}
// If the resource version is != "" it means the object already exists
// in kubernetes so we should perform an update instead of a create.
if cn.GetResourceVersion() != "" {
n.syncNode(k8sOpUpdate, cn)
} else {
n.syncNode(k8sOpCreate, cn)
}
}

Expand Down
81 changes: 30 additions & 51 deletions pkg/ipam/allocator/podcidr/podcidr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,8 @@ import (

"github.com/cilium/cilium/pkg/checker"
"github.com/cilium/cilium/pkg/controller"
ipamOption "github.com/cilium/cilium/pkg/ipam/option"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/trigger"
)

Expand Down Expand Up @@ -62,19 +60,6 @@ func mustNewTrigger(f func(), minInterval time.Duration) *trigger.Trigger {
return t
}

var defaultIPAMModes = []string{ipamOption.IPAMClusterPool, ipamOption.IPAMClusterPoolV2}

func runWithIPAMModes(ipamModes []string, testFunc func(mode string)) {
oldIPAMMode := option.Config.IPAM
defer func() {
option.Config.IPAM = oldIPAMMode
}()
for _, ipamMode := range ipamModes {
option.Config.IPAM = ipamMode
testFunc(ipamMode)
}
}

type mockCIDRAllocator struct {
OnOccupy func(cidr *net.IPNet) error
OnAllocateNext func() (*net.IPNet, error)
Expand Down Expand Up @@ -265,26 +250,23 @@ func (s *PodCIDRSuite) TestNodesPodCIDRManager_Delete(c *C) {
},
}

runWithIPAMModes(defaultIPAMModes, func(ipamMode string) {
for _, tt := range tests {
c.Logf("Running %q (ipam: %s)", tt.name, ipamMode)
tt.fields = tt.testSetup()
n := &NodesPodCIDRManager{
k8sReSyncController: tt.fields.k8sReSyncController,
k8sReSync: tt.fields.k8sReSync,
canAllocatePodCIDRs: tt.fields.canAllocateNodes,
v4CIDRAllocators: tt.fields.v4ClusterCIDRs,
v6CIDRAllocators: tt.fields.v6ClusterCIDRs,
nodes: tt.fields.nodes,
ciliumNodesToK8s: tt.fields.ciliumNodesToK8s,
}
n.Delete(tt.args.node)
for _, tt := range tests {
tt.fields = tt.testSetup()
n := &NodesPodCIDRManager{
k8sReSyncController: tt.fields.k8sReSyncController,
k8sReSync: tt.fields.k8sReSync,
canAllocatePodCIDRs: tt.fields.canAllocateNodes,
v4CIDRAllocators: tt.fields.v4ClusterCIDRs,
v6CIDRAllocators: tt.fields.v6ClusterCIDRs,
nodes: tt.fields.nodes,
ciliumNodesToK8s: tt.fields.ciliumNodesToK8s,
}
n.Delete(tt.args.node)

if tt.testPostRun != nil {
tt.testPostRun(tt.fields)
}
if tt.testPostRun != nil {
tt.testPostRun(tt.fields)
}
})
}
}

func (s *PodCIDRSuite) TestNodesPodCIDRManager_Resync(c *C) {
Expand Down Expand Up @@ -549,26 +531,23 @@ func (s *PodCIDRSuite) TestNodesPodCIDRManager_Upsert(c *C) {
},
}

runWithIPAMModes(defaultIPAMModes, func(ipamMode string) {
for _, tt := range tests {
c.Logf("Running %q (ipam: %s)", tt.name, ipamMode)
tt.fields = tt.testSetup()
n := &NodesPodCIDRManager{
k8sReSyncController: tt.fields.k8sReSyncController,
k8sReSync: tt.fields.k8sReSync,
canAllocatePodCIDRs: tt.fields.canAllocateNodes,
v4CIDRAllocators: tt.fields.v4ClusterCIDRs,
v6CIDRAllocators: tt.fields.v6ClusterCIDRs,
nodes: tt.fields.nodes,
ciliumNodesToK8s: tt.fields.ciliumNodesToK8s,
}
n.Upsert(tt.args.node)
for _, tt := range tests {
tt.fields = tt.testSetup()
n := &NodesPodCIDRManager{
k8sReSyncController: tt.fields.k8sReSyncController,
k8sReSync: tt.fields.k8sReSync,
canAllocatePodCIDRs: tt.fields.canAllocateNodes,
v4CIDRAllocators: tt.fields.v4ClusterCIDRs,
v6CIDRAllocators: tt.fields.v6ClusterCIDRs,
nodes: tt.fields.nodes,
ciliumNodesToK8s: tt.fields.ciliumNodesToK8s,
}
n.Upsert(tt.args.node)

if tt.testPostRun != nil {
tt.testPostRun(tt.fields)
}
if tt.testPostRun != nil {
tt.testPostRun(tt.fields)
}
})
}
}

func (s *PodCIDRSuite) TestNodesPodCIDRManager_allocateIPNets(c *C) {
Expand Down
Loading

0 comments on commit 22bb525

Please sign in to comment.