diff --git a/.github/workflows/kube-integration-tests-non-root-bypass.yaml b/.github/workflows/kube-integration-tests-non-root-bypass.yaml new file mode 100644 index 0000000000000..7272027bd20a0 --- /dev/null +++ b/.github/workflows/kube-integration-tests-non-root-bypass.yaml @@ -0,0 +1,43 @@ +# This workflow is required to ensure that required Github check passes even if +# the actual "Kube Integration Tests (Non-root)" workflow skipped due to path filtering. +# Otherwise it will stay forever pending. +# +# See "Handling skipped but required checks" for more info: +# +# https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks#handling-skipped-but-required-checks +# +# Note both workflows must have the same name. + +name: Kube Integration Tests (Non-root) +run-name: Skip Kube Integration Tests (Non-root) - ${{ github.run_id }} - @${{ github.actor }} + +on: + pull_request: + paths-ignore: + - '.github/workflows/kube-integration-tests-non-root.yaml' + - '**.go' + - 'go.mod' + - 'go.sum' + - 'build.assets/Makefile' + - 'build.assets/Dockerfile*' + - 'Makefile' + merge_group: + paths-ignore: + - '.github/workflows/kube-integration-tests-non-root.yaml' + - '**.go' + - 'go.mod' + - 'go.sum' + - 'build.assets/Makefile' + - 'build.assets/Dockerfile*' + - 'Makefile' + +jobs: + test: + name: Kube Integration Tests (Non-root) + runs-on: ubuntu-latest + + permissions: + contents: none + + steps: + - run: 'echo "No changes to verify"' diff --git a/.github/workflows/kube-integration-tests-non-root.yaml b/.github/workflows/kube-integration-tests-non-root.yaml new file mode 100644 index 0000000000000..2ca70b137d215 --- /dev/null +++ b/.github/workflows/kube-integration-tests-non-root.yaml @@ -0,0 +1,87 @@ +name: Kube Integration Tests (Non-root) +run-name: Kube Integration Tests (Non-root) - ${{ github.run_id }} - @${{ github.actor }} + +on: + push: + branches: + - master + - branch/* + pull_request: + paths: + - '.github/workflows/kube-integration-tests-non-root.yaml' + - '**.go' + - 'go.mod' + - 'go.sum' + - 'build.assets/Makefile' + - 'build.assets/Dockerfile*' + - 'Makefile' + merge_group: + paths: + - '.github/workflows/kube-integration-tests-non-root.yaml' + - '**.go' + - 'go.mod' + - 'go.sum' + - 'build.assets/Makefile' + - 'build.assets/Dockerfile*' + - 'Makefile' + +env: + TEST_KUBE: true + KUBECONFIG: /home/.kube/config + +jobs: + test: + name: Kube Integration Tests (Non-root) + if: ${{ !startsWith(github.head_ref, 'dependabot/') }} + runs-on: ubuntu-22.04-16core + + permissions: + contents: read + packages: read + + container: + image: ghcr.io/gravitational/teleport-buildbox:teleport13 + env: + WEBASSETS_SKIP_BUILD: 1 + options: --cap-add=SYS_ADMIN --privileged + + steps: + - name: Checkout Teleport + uses: actions/checkout@v3 + + - name: Prepare workspace + uses: ./.github/actions/prepare-workspace + + - name: Chown + run: | + mkdir -p $(go env GOMODCACHE) + mkdir -p $(go env GOCACHE) + chown -Rf ci:ci ${GITHUB_WORKSPACE} $(go env GOMODCACHE) $(go env GOCACHE) + continue-on-error: true + + - name: Create KinD cluster + uses: helm/kind-action@v1.5.0 + with: + cluster_name: kind + config: fixtures/kind/config.yaml + +# The current container where tests run isn't linked to the KinD network and +# we won't be able to access the KinD control plane without linking them. +# This step is required because our tests run in teleport-buildbox container +# and by default the KinD container network isn't exposed to it. +# Connecting the network allow us to access the control plane using DNS kind-control-plane. +# It also copies the default kubeconfig and places it in /home/.kube so ci user +# is able to access it. + - name: Link test container to KinD network + run: | + docker network connect kind $(cat /etc/hostname) + kubectl config set-cluster kind-kind --server=https://kind-control-plane:6443 + kubectl cluster-info + kubectl apply -f fixtures/ci-teleport-rbac/ci-teleport.yaml + cp -r $HOME/.kube /home/ + chown -R ci:ci /home/.kube + + - name: Run tests + timeout-minutes: 40 + run: | + runuser -u ci -g ci make rdpclient integration-kube diff --git a/Makefile b/Makefile index 6e830040538ad..79138955de097 100644 --- a/Makefile +++ b/Makefile @@ -844,6 +844,20 @@ integration: $(TEST_LOG_DIR) $(RENDER_TESTS) | tee $(TEST_LOG_DIR)/integration.json \ | $(RENDER_TESTS) -report-by test +# +# Integration tests that run Kubernetes tests in order to complete successfully +# are run separately to all other integration tests. +# +INTEGRATION_KUBE_REGEX := TestKube.* +.PHONY: integration-kube +integration-kube: FLAGS ?= -v -race +integration-kube: PACKAGES = $(shell go list ./... | grep 'integration\([^s]\|$$\)') +integration-kube: $(TEST_LOG_DIR) $(RENDER_TESTS) + @echo KUBECONFIG is: $(KUBECONFIG), TEST_KUBE: $(TEST_KUBE) + $(CGOFLAG) go test -json -run "$(INTEGRATION_KUBE_REGEX)" $(PACKAGES) $(FLAGS) \ + | tee $(TEST_LOG_DIR)/integration-kube.json \ + | $(RENDER_TESTS) -report-by test + # # Integration tests which need to be run as root in order to complete successfully # are run separately to all other integration tests. Need a TTY to work. diff --git a/build.assets/Makefile b/build.assets/Makefile index 1e9c056d9bd6b..d3aaa380b0862 100644 --- a/build.assets/Makefile +++ b/build.assets/Makefile @@ -328,6 +328,11 @@ integration-root: buildbox docker run $(DOCKERFLAGS) -t $(BUILDBOX) \ /bin/bash -c "make -C $(SRCDIR) FLAGS='-cover' integration-root" +.PHONY:integration-kube +integration-kube: buildbox + docker run $(DOCKERFLAGS) -t $(BUILDBOX) \ + /bin/bash -c "make -C $(SRCDIR) FLAGS='-cover' integration-kube" + # # Runs linters on new changes inside a build container. # diff --git a/fixtures/kind/config.yaml b/fixtures/kind/config.yaml new file mode 100644 index 0000000000000..530a99fd7d109 --- /dev/null +++ b/fixtures/kind/config.yaml @@ -0,0 +1,6 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + apiServerAddress: '127.0.0.1' + apiServerPort: 6443 + diff --git a/go.mod b/go.mod index b0e84e06d2af4..68c6ca17fcb28 100644 --- a/go.mod +++ b/go.mod @@ -387,6 +387,9 @@ replace ( github.com/julienschmidt/httprouter => github.com/gravitational/httprouter v1.3.1-0.20220408074523-c876c5e705a5 github.com/keys-pub/go-libfido2 => github.com/gravitational/go-libfido2 v1.5.3-0.20230202181331-c71192ef1c8a github.com/microsoft/go-mssqldb => github.com/gravitational/go-mssqldb v0.11.1-0.20230331180905-0f76f1751cd3 + // replace module github.com/moby/spdystream until https://github.com/moby/spdystream/pull/91 merges and deps are updated + // otherwise tests fail with a data race detection. + github.com/moby/spdystream => github.com/gravitational/spdystream v0.0.0-20230512133543-4e46862ca9bf github.com/sirupsen/logrus => github.com/gravitational/logrus v1.4.4-0.20210817004754-047e20245621 github.com/vulcand/predicate => github.com/gravitational/predicate v1.3.0 // Use our internal crypto fork, to work around the issue with OpenSSH <= 7.6 mentioned here: https://github.com/golang/go/issues/53391 diff --git a/go.sum b/go.sum index 3faa6b6aedb58..2fe57b98b6f10 100644 --- a/go.sum +++ b/go.sum @@ -772,6 +772,8 @@ github.com/gravitational/redis/v9 v9.0.0-teleport.3 h1:Eg/j3jiNUZ558KDXOqzF682EF github.com/gravitational/redis/v9 v9.0.0-teleport.3/go.mod h1:8et+z03j0l8N+DvsVnclzjf3Dl/pFHgRk+2Ct1qw66A= github.com/gravitational/roundtrip v1.0.2 h1:eOCY0NEKKaB0ksJmvhO6lPMFz1pIIef+vyPBTBROQ5c= github.com/gravitational/roundtrip v1.0.2/go.mod h1:fuI1booM2hLRA/B/m5MRAPOU6mBZNYcNycono2UuTw0= +github.com/gravitational/spdystream v0.0.0-20230512133543-4e46862ca9bf h1:aXnqDSit8L1qhI0+QdbJh+MTUFKXG7qbkZXnfr7L96A= +github.com/gravitational/spdystream v0.0.0-20230512133543-4e46862ca9bf/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/gravitational/trace v1.2.1 h1:Iaf43aqbKV5H8bdiRs1qByjEHgAfADJ0lt0JwRyu+q8= github.com/gravitational/trace v1.2.1/go.mod h1:n0ijrq6psJY0sOI/NzLp+xdd8xl79jjwzVOFHDY6+kQ= github.com/gravitational/ttlmap v0.0.0-20171116003245-91fd36b9004c h1:C2iWDiod8vQ3YnOiCdMP9qYeg2UifQ8KSk36r0NswSE= @@ -1070,8 +1072,6 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/integration/kube_integration_test.go b/integration/kube_integration_test.go index 442db7f33e89c..473a9b4808f37 100644 --- a/integration/kube_integration_test.go +++ b/integration/kube_integration_test.go @@ -34,6 +34,7 @@ import ( "github.com/gravitational/trace" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/http2" v1 "k8s.io/api/core/v1" @@ -89,12 +90,10 @@ func newKubeSuite(t *testing.T) *KubeSuite { if ok, _ := strconv.ParseBool(testEnabled); !ok { t.Skip("Skipping Kubernetes test suite.") } - suite := &KubeSuite{ kubeConfigPath: os.Getenv(teleport.EnvKubeConfig), } require.NotEmpty(t, suite.kubeConfigPath, "This test requires path to valid kubeconfig.") - var err error suite.priv, suite.pub, err = testauthority.New().GenerateKeyPair() require.NoError(t, err) @@ -131,7 +130,14 @@ func newKubeSuite(t *testing.T) *KubeSuite { if err != nil { require.True(t, errors.IsAlreadyExists(err), "Failed to create test pod: %v", err) } - + // Wait for pod to be running. + require.Eventually(t, func() bool { + rsp, err := suite.CoreV1().Pods(testNamespace).Get(context.Background(), testPod, metav1.GetOptions{}) + if err != nil { + return false + } + return rsp.Status.Phase == v1.PodRunning + }, 60*time.Second, time.Millisecond*500) return suite } @@ -180,6 +186,9 @@ func testExec(t *testing.T, suite *KubeSuite, pinnedIP string, clientError strin Logins: []string{username}, KubeGroups: kubeGroups, KubeUsers: kubeUsers, + KubernetesLabels: types.Labels{ + types.Wildcard: {types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -231,20 +240,20 @@ func testExec(t *testing.T, suite *KubeSuite, pinnedIP string, clientError strin Groups: role.GetKubeGroups(types.Allow), }, }) + require.NoError(t, err) + + _, err = scopedProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{}) if clientError != "" { require.ErrorContains(t, err, clientError) return } - require.NoError(t, err) - - _, err = scopedProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{}) - require.NoError(t, err) // set up kube configuration using proxy proxyClient, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{ T: teleport, Username: username, KubeUsers: kubeUsers, + PinnedIP: pinnedIP, KubeGroups: kubeGroups, }) require.NoError(t, err) @@ -448,6 +457,9 @@ func testKubePortForward(t *testing.T, suite *KubeSuite) { Allow: types.RoleConditions{ Logins: []string{username}, KubeGroups: kubeGroups, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -486,7 +498,7 @@ func testKubePortForward(t *testing.T, suite *KubeSuite) { forwarderCh := make(chan error) go func() { forwarderCh <- forwarder.ForwardPorts() }() defer func() { - require.NoError(t, <-forwarderCh, "Forward ports exited with error") + assert.NoError(t, <-forwarderCh, "Forward ports exited with error") }() select { @@ -549,6 +561,9 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) { Allow: types.RoleConditions{ Logins: []string{username}, KubeGroups: mainKubeGroups, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -584,7 +599,7 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) { // connect aux cluster to main cluster // using trusted clusters, so remote user will be allowed to assume // role specified by mapping remote role "aux-kube" to local role "main-kube" - auxKubeGroups := []string{teleport.TraitInternalKubeGroupsVariable} + auxKubeGroups := []string{kube.TestImpersonationGroup} auxRole, err := types.NewRole("aux-kube", types.RoleSpecV6{ Allow: types.RoleConditions{ Logins: []string{username}, @@ -592,6 +607,9 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) { // to the remote cluster, and remote cluster // can choose to use them by using special variable KubeGroups: auxKubeGroups, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -644,6 +662,14 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) { require.Eventually(t, helpers.WaitForClusters(aux.Tunnel, 1), 10*time.Second, 1*time.Second, "Two clusters do not see each other: tunnels are not working.") + require.Eventually(t, func() bool { + tc, err := main.Process.GetAuthServer().GetRemoteCluster(aux.Secrets.SiteName) + if err != nil { + return false + } + return tc.GetConnectionStatus() == teleport.RemoteClusterStatusOnline + }, 60*time.Second, 1*time.Second, "Main cluster does not see aux cluster as connected") + // impersonating client requests will be denied impersonatingProxyClient, impersonatingProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{ T: main, @@ -783,7 +809,6 @@ loop: // This request should be denied err = impersonatingForwarder.ForwardPorts() require.Error(t, err) - require.Regexp(t, ".*impersonation request has been denied.*", err.Error()) } // TestKubeTrustedClustersSNI tests scenario with trusted clusters @@ -810,6 +835,9 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) { Allow: types.RoleConditions{ Logins: []string{username}, KubeGroups: mainKubeGroups, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -849,10 +877,13 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) { // connect aux cluster to main cluster // using trusted clusters, so remote user will be allowed to assume // role specified by mapping remote role "aux-kube" to local role "main-kube" - auxKubeGroups := []string{teleport.TraitInternalKubeGroupsVariable} + auxKubeGroups := []string{kube.TestImpersonationGroup} auxRole, err := types.NewRole("aux-kube", types.RoleSpecV6{ Allow: types.RoleConditions{ Logins: []string{username}, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, // Note that main cluster can pass it's kubernetes groups // to the remote cluster, and remote cluster // can choose to use them by using special variable @@ -909,6 +940,14 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) { require.Eventually(t, helpers.WaitForClusters(aux.Tunnel, 1), 10*time.Second, 1*time.Second, "Two clusters do not see each other: tunnels are not working.") + require.Eventually(t, func() bool { + tc, err := main.Process.GetAuthServer().GetRemoteCluster(aux.Secrets.SiteName) + if err != nil { + return false + } + return tc.GetConnectionStatus() == teleport.RemoteClusterStatusOnline + }, 60*time.Second, 1*time.Second, "Main cluster does not see aux cluster as connected") + // impersonating client requests will be denied impersonatingProxyClient, impersonatingProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{ T: main, @@ -1100,6 +1139,9 @@ func runKubeDisconnectTest(t *testing.T, suite *KubeSuite, tc disconnectTestCase Allow: types.RoleConditions{ Logins: []string{username}, KubeGroups: kubeGroups, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -1190,6 +1232,9 @@ func testKubeTransportProtocol(t *testing.T, suite *KubeSuite) { Allow: types.RoleConditions{ Logins: []string{username}, KubeGroups: kubeGroups, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -1325,7 +1370,7 @@ func kubeProxyTLSConfig(cfg kube.ProxyConfig) (*tls.Config, error) { return nil, trace.Wrap(err) } - tlsConfig.RootCAs = &x509.CertPool{} + tlsConfig.RootCAs = x509.NewCertPool() tlsConfig.RootCAs.AddCert(caCert) tlsConfig.Certificates = []tls.Certificate{cert} tlsConfig.ServerName = kubeConfig.TLSClientConfig.ServerName @@ -1463,19 +1508,12 @@ func kubeExec(kubeConfig *rest.Config, args kubeExecArgs) error { return executor.StreamWithContext(context.Background(), opts) } -func kubeJoin(kubeConfig kube.ProxyConfig, tc *client.TeleportClient, sessionID string) (*client.KubeSession, error) { +func kubeJoin(kubeConfig kube.ProxyConfig, tc *client.TeleportClient, meta types.SessionTracker) (*client.KubeSession, error) { tlsConfig, err := kubeProxyTLSConfig(kubeConfig) if err != nil { return nil, trace.Wrap(err) } - meta, err := types.NewSessionTracker(types.SessionTrackerSpecV1{ - SessionID: sessionID, - }) - if err != nil { - return nil, trace.Wrap(err) - } - sess, err := client.NewKubeSession(context.TODO(), tc, meta, kubeConfig.T.Config.Proxy.Kube.ListenAddr.Addr, "", types.SessionPeerMode, tlsConfig) if err != nil { return nil, trace.Wrap(err) @@ -1506,6 +1544,9 @@ func testKubeJoin(t *testing.T, suite *KubeSuite) { Logins: []string{hostUsername}, KubeGroups: kubeGroups, KubeUsers: kubeUsers, + KubernetesLabels: types.Labels{ + types.Wildcard: []string{types.Wildcard}, + }, KubernetesResources: []types.KubernetesResource{ { Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, @@ -1571,12 +1612,28 @@ func testKubeJoin(t *testing.T, suite *KubeSuite) { // We need to wait for the exec request to be handled here for the session to be // created. Sadly though the k8s API doesn't give us much indication of when that is. - time.Sleep(time.Second * 5) + var session types.SessionTracker + require.Eventually(t, func() bool { + // We need to wait for the session to be created here. We can't use the + // session manager's WaitUntilExists method because it doesn't work for + // kubernetes sessions. + sessions, err := teleport.Process.GetAuthServer().GetActiveSessionTrackers(context.Background()) + if err != nil || len(sessions) == 0 { + return false + } + + session = sessions[0] + return true + }, 10*time.Second, time.Second) - participantStdinR, participantStdinW, err := os.Pipe() - participantStdoutR, participantStdoutW, err := os.Pipe() + participantStdinR, participantStdinW := io.Pipe() + participantStdoutR, participantStdoutW := io.Pipe() - tc, err := teleport.NewClient(helpers.ClientConfig{}) + tc, err := teleport.NewClient(helpers.ClientConfig{ + Login: hostUsername, + Cluster: helpers.Site, + Host: Host, + }) require.NoError(t, err) tc.Stdin = participantStdinR @@ -1587,7 +1644,7 @@ func testKubeJoin(t *testing.T, suite *KubeSuite) { Username: participantUsername, KubeUsers: kubeUsers, KubeGroups: kubeGroups, - }, tc, "") + }, tc, session) require.NoError(t, err) defer stream.Close() @@ -1597,16 +1654,19 @@ func testKubeJoin(t *testing.T, suite *KubeSuite) { time.Sleep(time.Second * 5) // sent a test message from the participant - participantStdinW.WriteString("\aecho hi2\n\r") + participantStdinW.Write([]byte("\aecho hi2\n\r")) // lets type "echo hi" followed by "enter" and then "exit" + "enter": term.Type("\aecho hi\n\r") // Terminate the session after a moment to allow for the IO to reach the second client. - time.AfterFunc(5*time.Second, func() { term.Type("\aexit\n\r\a") }) + time.AfterFunc(5*time.Second, func() { + term.Type("\aexit\n\r\a") + participantStdoutW.Close() + }) participantOutput, err := io.ReadAll(participantStdoutR) require.NoError(t, err) - require.Contains(t, participantOutput, []byte("echo hi")) - require.Contains(t, out.String(), []byte("echo hi2")) + require.Contains(t, string(participantOutput), "echo hi") + require.Contains(t, out.String(), "echo hi2") } diff --git a/lib/kube/proxy/forwarder.go b/lib/kube/proxy/forwarder.go index bd345f43e5422..8a8ba0b4addf8 100644 --- a/lib/kube/proxy/forwarder.go +++ b/lib/kube/proxy/forwarder.go @@ -2614,7 +2614,22 @@ func (f *Forwarder) removeKubeDetails(name string) { // KubeProxy services or remote clusters are automatically forwarded to // the final destination. func (f *Forwarder) isLocalKubeCluster(sess *clusterSession) bool { - return !sess.authContext.teleportCluster.isRemote && f.cfg.KubeServiceType == KubeService + switch f.cfg.KubeServiceType { + case KubeService: + // Kubernetes service is always local. + return true + case LegacyProxyService: + // remote clusters are always forwarded to the final destination. + if sess.authContext.teleportCluster.isRemote { + return false + } + // Legacy proxy service is local only if the kube cluster name matches + // with clusters served by this agent. + _, err := f.findKubeDetailsByClusterName(sess.authContext.kubeClusterName) + return err == nil + default: + return false + } } // listPods forwards the pod list request to the target server, captures