diff --git a/lib/kube/proxy/moderated_sessions_test.go b/lib/kube/proxy/moderated_sessions_test.go index cc1dc9e3c85da..007cf280af656 100644 --- a/lib/kube/proxy/moderated_sessions_test.go +++ b/lib/kube/proxy/moderated_sessions_test.go @@ -43,6 +43,7 @@ import ( "github.com/gravitational/teleport/api/types" apievents "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/entitlements" + "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/events" testingkubemock "github.com/gravitational/teleport/lib/kube/proxy/testing/kube_server" "github.com/gravitational/teleport/lib/modules" @@ -527,12 +528,20 @@ func TestInteractiveSessionsNoAuth(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { kubeMock.Close() }) + authClient := &fakeClient{ + // ClientI is left nil intentionally because we to set it via AuthClientWrapper. + closeC: make(chan struct{}), + } // creates a Kubernetes service with a configured cluster pointing to mock api server testCtx := SetupTestContext( context.Background(), t, TestConfig{ Clusters: []KubeClusterConfig{{Name: kubeCluster, APIEndpoint: kubeMock.URL}}, + WrapAuthClient: func(client authclient.ClientI) authclient.ClientI { + authClient.ClientI = client + return authClient + }, }, ) // close tests @@ -620,7 +629,7 @@ func TestInteractiveSessionsNoAuth(t *testing.T) { // Mark the lock as stale so that the session with strict locking is denied. close(testCtx.lockWatcher.StaleC) // force the auth client to return an error when trying to create a session. - close(testCtx.closeSessionTrackers) + close(authClient.closeC) require.Eventually(t, func() bool { return testCtx.lockWatcher.IsStale() diff --git a/lib/kube/proxy/utils_test.go b/lib/kube/proxy/utils_test.go index 183a2cf4907e8..ea0a8ec227fc9 100644 --- a/lib/kube/proxy/utils_test.go +++ b/lib/kube/proxy/utils_test.go @@ -72,23 +72,22 @@ import ( ) type TestContext struct { - HostID string - ClusterName string - TLSServer *authtest.TLSServer - AuthServer *auth.Server - AuthClient *authclient.Client - Authz authz.Authorizer - KubeServer *TLSServer - KubeProxy *TLSServer - Emitter *eventstest.ChannelEmitter - Context context.Context - kubeServerListener net.Listener - kubeProxyListener net.Listener - cancel context.CancelFunc - heartbeatCtx context.Context - heartbeatCancel context.CancelFunc - lockWatcher *services.LockWatcher - closeSessionTrackers chan struct{} + HostID string + ClusterName string + TLSServer *authtest.TLSServer + AuthServer *auth.Server + AuthClient *authclient.Client + Authz authz.Authorizer + KubeServer *TLSServer + KubeProxy *TLSServer + Emitter *eventstest.ChannelEmitter + Context context.Context + kubeServerListener net.Listener + kubeProxyListener net.Listener + cancel context.CancelFunc + heartbeatCtx context.Context + heartbeatCancel context.CancelFunc + lockWatcher *services.LockWatcher } // KubeClusterConfig defines the cluster to be created @@ -107,6 +106,7 @@ type TestConfig struct { OnEvent func(apievents.AuditEvent) ClusterFeatures func() proto.Features CreateAuditStreamErr error + WrapAuthClient func(authclient.ClientI) authclient.ClientI } // SetupTestContext creates a kube service with clusters configured. @@ -114,13 +114,12 @@ func SetupTestContext(ctx context.Context, t *testing.T, cfg TestConfig) *TestCo ctx, cancel := context.WithCancel(ctx) heartbeatCtx, heartbeatCancel := context.WithCancel(ctx) testCtx := &TestContext{ - ClusterName: "root.example.com", - HostID: uuid.New().String(), - Context: ctx, - cancel: cancel, - heartbeatCtx: heartbeatCtx, - heartbeatCancel: heartbeatCancel, - closeSessionTrackers: make(chan struct{}), + ClusterName: "root.example.com", + HostID: uuid.New().String(), + Context: ctx, + cancel: cancel, + heartbeatCtx: heartbeatCtx, + heartbeatCancel: heartbeatCancel, } t.Cleanup(func() { testCtx.Close() }) @@ -255,6 +254,11 @@ func SetupTestContext(ctx context.Context, t *testing.T, cfg TestConfig) *TestCo require.NoError(t, err) t.Cleanup(func() { require.NoError(t, healthCheckManager.Close()) }) + var authClient authclient.ClientI = client + if cfg.WrapAuthClient != nil { + authClient = cfg.WrapAuthClient(client) + } + // Create kubernetes service server. testCtx.KubeServer, err = NewTLSServer(TLSServerConfig{ ForwarderConfig: ForwarderConfig{ @@ -268,7 +272,7 @@ func SetupTestContext(ctx context.Context, t *testing.T, cfg TestConfig) *TestCo // directly to AuthClient solves the issue. // We wrap the AuthClient with an events.TeeStreamer to send non-disk // events like session.end to testCtx.emitter as well. - AuthClient: &fakeClient{ClientI: client, closeC: testCtx.closeSessionTrackers}, + AuthClient: authClient, // StreamEmitter is required although not used because we are using // "node-sync" as session recording mode. Emitter: testCtx.Emitter, @@ -351,7 +355,7 @@ func SetupTestContext(ctx context.Context, t *testing.T, cfg TestConfig) *TestCo // directly to AuthClient solves the issue. // We wrap the AuthClient with an events.TeeStreamer to send non-disk // events like session.end to testCtx.emitter as well. - AuthClient: &fakeClient{ClientI: client, closeC: testCtx.closeSessionTrackers}, + AuthClient: authClient, // StreamEmitter is required although not used because we are using // "node-sync" as session recording mode. Emitter: testCtx.Emitter, diff --git a/lib/kube/proxy/watcher_test.go b/lib/kube/proxy/watcher_test.go index 93c68e69ce9a5..5c6619f13ed0d 100644 --- a/lib/kube/proxy/watcher_test.go +++ b/lib/kube/proxy/watcher_test.go @@ -44,9 +44,8 @@ type mockAuthClient struct { deleteCh chan string } -func newMockAuthClient(client authclient.ClientI) *mockAuthClient { +func newMockAuthClient() *mockAuthClient { return &mockAuthClient{ - ClientI: client, deleteCh: make(chan string, 1), } } @@ -70,6 +69,8 @@ func TestWatcher(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) reconcileCh := make(chan types.KubeClusters) + + authClient := newMockAuthClient() // Setup kubernetes server that proxies one static kube cluster and // watches for kube_clusters with label group=a. testCtx := SetupTestContext(ctx, t, TestConfig{ @@ -86,9 +87,11 @@ func TestWatcher(t *testing.T) { return } }, + WrapAuthClient: func(client authclient.ClientI) authclient.ClientI { + authClient.ClientI = client + return authClient + }, }) - authClient := newMockAuthClient(testCtx.KubeServer.AuthClient) - testCtx.KubeServer.AuthClient = authClient require.Len(t, testCtx.KubeServer.fwd.kubeClusters(), 1) kube0 := testCtx.KubeServer.fwd.kubeClusters()[0]