Skip to content

Commit aafb9d4

Browse files
authored
Merge pull request kubernetes#133595 from tkashem/fix-dra-test-flake
fix flake in dra test 'TestPlugin'
2 parents b7706ef + 747a295 commit aafb9d4

File tree

1 file changed

+21
-1
lines changed

1 file changed

+21
-1
lines changed

pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import (
4141
"k8s.io/client-go/informers"
4242
"k8s.io/client-go/kubernetes/fake"
4343
cgotesting "k8s.io/client-go/testing"
44+
"k8s.io/client-go/tools/cache"
4445
"k8s.io/client-go/tools/events"
4546
resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker"
4647
kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1"
@@ -2062,7 +2063,21 @@ func setup(t *testing.T, args *config.DynamicResourcesArgs, nodes []*v1.Node, cl
20622063
}
20632064
resourceSliceTracker, err := resourceslicetracker.StartTracker(tCtx, resourceSliceTrackerOpts)
20642065
require.NoError(t, err, "couldn't start resource slice tracker")
2065-
tc.draManager = NewDRAManager(tCtx, assumecache.NewAssumeCache(tCtx.Logger(), tc.informerFactory.Resource().V1().ResourceClaims().Informer(), "resource claim", "", nil), resourceSliceTracker, tc.informerFactory)
2066+
2067+
claimsCache := assumecache.NewAssumeCache(tCtx.Logger(), tc.informerFactory.Resource().V1().ResourceClaims().Informer(), "resource claim", "", nil)
2068+
// NewAssumeCache calls the informer's AddEventHandler method to register
2069+
// a handler in order to stay in sync with the informer's store, but
2070+
// NewAssumeCache does not return the ResourceEventHandlerRegistration.
2071+
// We call AddEventHandler of the assume cache, passing it a noop
2072+
// ResourceEventHandler in order to get access to the
2073+
// ResourceEventHandlerRegistration returned by the informer.
2074+
//
2075+
// This is not the registered handler that is used by the DRA
2076+
// manager, but it is close enough because the assume cache
2077+
// uses a single boolean for "is synced" for all handlers.
2078+
registeredHandler := claimsCache.AddEventHandler(cache.ResourceEventHandlerFuncs{})
2079+
2080+
tc.draManager = NewDRAManager(tCtx, claimsCache, resourceSliceTracker, tc.informerFactory)
20662081
opts := []runtime.Option{
20672082
runtime.WithClientSet(tc.client),
20682083
runtime.WithInformerFactory(tc.informerFactory),
@@ -2103,6 +2118,11 @@ func setup(t *testing.T, args *config.DynamicResourcesArgs, nodes []*v1.Node, cl
21032118
})
21042119

21052120
tc.informerFactory.WaitForCacheSync(tc.ctx.Done())
2121+
// The above does not tell us if the registered handler (from NewAssumeCache)
2122+
// is synced, we need to wait until HasSynced of the handler returns
2123+
// true, this ensures that the assume cache is in sync with the informer's
2124+
// store which has been informed by at least one full LIST of the underlying storage.
2125+
cache.WaitForCacheSync(tc.ctx.Done(), registeredHandler.HasSynced)
21062126

21072127
for _, node := range nodes {
21082128
nodeInfo := framework.NewNodeInfo()

0 commit comments

Comments
 (0)