Skip to content
This repository was archived by the owner on Dec 17, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 0 additions & 44 deletions pkg/cache/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"fmt"
"runtime/debug"
"sort"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -130,9 +129,6 @@ type ClusterCache interface {
Invalidate(opts ...UpdateSettingsFunc)
// FindResources returns resources that matches given list of predicates from specified namespace or everywhere if specified namespace is empty
FindResources(namespace string, predicates ...func(r *Resource) bool) map[kube.ResourceKey]*Resource
// IterateHierarchy iterates resource tree starting from the specified top level resource and executes callback for each resource in the tree.
// The action callback returns true if iteration should continue and false otherwise.
IterateHierarchy(key kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool)
// IterateHierarchyV2 iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree.
// The action callback returns true if iteration should continue and false otherwise.
IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool)
Expand Down Expand Up @@ -1055,46 +1051,6 @@ func (c *clusterCache) FindResources(namespace string, predicates ...func(r *Res
return result
}

// IterateHierarchy iterates resource tree starting from the specified top level resource and executes callback for each resource in the tree
func (c *clusterCache) IterateHierarchy(key kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if res, ok := c.resources[key]; ok {
nsNodes := c.nsIndex[key.Namespace]
if !action(res, nsNodes) {
return
}
childrenByUID := make(map[types.UID][]*Resource)
for _, child := range nsNodes {
if res.isParentOf(child) {
childrenByUID[child.Ref.UID] = append(childrenByUID[child.Ref.UID], child)
}
}
// make sure children has no duplicates
for _, children := range childrenByUID {
if len(children) > 0 {
// The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group). It is ok to pick any object but we need to make sure
// we pick the same child after every refresh.
sort.Slice(children, func(i, j int) bool {
key1 := children[i].ResourceKey()
key2 := children[j].ResourceKey()
return strings.Compare(key1.String(), key2.String()) < 0
})
child := children[0]
if action(child, nsNodes) {
child.iterateChildren(nsNodes, map[kube.ResourceKey]bool{res.ResourceKey(): true}, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool {
if err != nil {
c.log.V(2).Info(err.Error())
return false
}
return action(child, namespaceResources)
})
}
}
}
}
}

// IterateHierarchy iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree
func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
c.lock.RLock()
Expand Down
103 changes: 1 addition & 102 deletions pkg/cache/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func (c *clusterCache) WithAPIResources(newApiResources []kube.APIResourceInfo)

func getChildren(cluster *clusterCache, un *unstructured.Unstructured) []*Resource {
hierarchy := make([]*Resource, 0)
cluster.IterateHierarchy(kube.GetResourceKey(un), func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
cluster.IterateHierarchyV2([]kube.ResourceKey{kube.GetResourceKey(un)}, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
hierarchy = append(hierarchy, child)
return true
})
Expand Down Expand Up @@ -1045,92 +1045,6 @@ func testDeploy() *appsv1.Deployment {
}
}

func TestIterateHierachy(t *testing.T) {
cluster := newCluster(t, testPod1(), testPod2(), testRS(), testExtensionsRS(), testDeploy())
err := cluster.EnsureSynced()
require.NoError(t, err)

t.Run("IterateAll", func(t *testing.T) {
keys := []kube.ResourceKey{}
cluster.IterateHierarchy(kube.GetResourceKey(mustToUnstructured(testDeploy())), func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
keys = append(keys, child.ResourceKey())
return true
})

assert.ElementsMatch(t,
[]kube.ResourceKey{
kube.GetResourceKey(mustToUnstructured(testPod1())),
kube.GetResourceKey(mustToUnstructured(testPod2())),
kube.GetResourceKey(mustToUnstructured(testRS())),
kube.GetResourceKey(mustToUnstructured(testDeploy())),
},
keys)
})

t.Run("ExitAtRoot", func(t *testing.T) {
keys := []kube.ResourceKey{}
cluster.IterateHierarchy(kube.GetResourceKey(mustToUnstructured(testDeploy())), func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
keys = append(keys, child.ResourceKey())
return false
})

assert.ElementsMatch(t,
[]kube.ResourceKey{
kube.GetResourceKey(mustToUnstructured(testDeploy())),
},
keys)
})

t.Run("ExitAtSecondLevelChild", func(t *testing.T) {
keys := []kube.ResourceKey{}
cluster.IterateHierarchy(kube.GetResourceKey(mustToUnstructured(testDeploy())), func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
keys = append(keys, child.ResourceKey())
return child.ResourceKey().Kind != kube.ReplicaSetKind
})

assert.ElementsMatch(t,
[]kube.ResourceKey{
kube.GetResourceKey(mustToUnstructured(testDeploy())),
kube.GetResourceKey(mustToUnstructured(testRS())),
},
keys)
})

t.Run("ExitAtThirdLevelChild", func(t *testing.T) {
keys := []kube.ResourceKey{}
cluster.IterateHierarchy(kube.GetResourceKey(mustToUnstructured(testDeploy())), func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
keys = append(keys, child.ResourceKey())
return child.ResourceKey().Kind != kube.PodKind
})

assert.ElementsMatch(t,
[]kube.ResourceKey{
kube.GetResourceKey(mustToUnstructured(testDeploy())),
kube.GetResourceKey(mustToUnstructured(testRS())),
kube.GetResourceKey(mustToUnstructured(testPod1())),
kube.GetResourceKey(mustToUnstructured(testPod2())),
},
keys)
})

// After uid is backfilled for owner of pod2, it should appear in results here as well.
t.Run("IterateStartFromExtensionsRS", func(t *testing.T) {
keys := []kube.ResourceKey{}
cluster.IterateHierarchy(kube.GetResourceKey(mustToUnstructured(testExtensionsRS())), func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
keys = append(keys, child.ResourceKey())
return true
})

assert.ElementsMatch(t,
[]kube.ResourceKey{
kube.GetResourceKey(mustToUnstructured(testPod1())),
kube.GetResourceKey(mustToUnstructured(testPod2())),
kube.GetResourceKey(mustToUnstructured(testExtensionsRS())),
},
keys)
})
}

func TestIterateHierachyV2(t *testing.T) {
cluster := newCluster(t, testPod1(), testPod2(), testRS(), testExtensionsRS(), testDeploy())
err := cluster.EnsureSynced()
Expand Down Expand Up @@ -1378,18 +1292,3 @@ func BenchmarkIterateHierarchyV2(b *testing.B) {
})
}
}

// func BenchmarkIterateHierarchy(b *testing.B) {
// cluster := newCluster(b)
// for _, resource := range testResources {
// cluster.setNode(resource)
// }
// b.ResetTimer()
// for n := 0; n < b.N; n++ {
// cluster.IterateHierarchy(kube.ResourceKey{
// Namespace: "default", Name: "test-1", Kind: "Pod",
// }, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool {
// return true
// })
// }
//}
5 changes: 0 additions & 5 deletions pkg/cache/mocks/ClusterCache.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pkg/cache/predicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func ExampleNewClusterCache_inspectNamespaceResources() {
}
// Iterate default namespace resources tree
for _, root := range clusterCache.FindResources("default", TopLevelResource) {
clusterCache.IterateHierarchy(root.ResourceKey(), func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
clusterCache.IterateHierarchyV2([]kube.ResourceKey{root.ResourceKey()}, func(resource *Resource, _ map[kube.ResourceKey]*Resource) bool {
fmt.Printf("resource: %s, info: %v\n", resource.Ref.String(), resource.Info)
return true
})
Expand Down
24 changes: 0 additions & 24 deletions pkg/cache/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,30 +75,6 @@ func (r *Resource) toOwnerRef() metav1.OwnerReference {
return metav1.OwnerReference{UID: r.Ref.UID, Name: r.Ref.Name, Kind: r.Ref.Kind, APIVersion: r.Ref.APIVersion}
}

func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey) map[kube.ResourceKey]bool {
newSet := make(map[kube.ResourceKey]bool)
for k, v := range set {
newSet[k] = v
}
for i := range keys {
newSet[keys[i]] = true
}
return newSet
}

func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents map[kube.ResourceKey]bool, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
for childKey, child := range ns {
if r.isParentOf(ns[childKey]) {
if parents[childKey] {
key := r.ResourceKey()
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
} else if action(nil, child, ns) {
child.iterateChildren(ns, newResourceKeySet(parents, r.ResourceKey()), action)
}
}
}
}

// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource.
func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) {
key := r.ResourceKey()
Expand Down