Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 14 additions & 13 deletions lib/vnet/fqdn_resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,11 +226,12 @@ func (r *fqdnResolver) resolveAppInfoForCluster(
}, nil
}

// VNet SSH handles SSH hostnames matching "<hostname>.<cluster_name>." or
// "<hostname>.<leaf_cluster_name>.<cluster_name>.". tryResolveSSH checks if
// fqdn matches that pattern for any logged-in cluster and if so returns a
// match. We never actually query for whether or not a matching SSH node exists,
// we just attempt to dial it when the client connects to the assigned IP.
// VNet SSH handles SSH hostnames matching "<hostname>.<cluster_name>.", where
// the <cluster-name> may be the name of a root or leaf cluster.
// tryResolveSSH checks if fqdn matches that pattern for any known cluster
// and if so returns a match. We never actually query for whether or not a
// matching SSH node exists, we just attempt to dial it when the client
// connects to the assigned IP.
func (r *fqdnResolver) tryResolveSSH(ctx context.Context, profileNames []string, fqdn string) (*vnetv1.ResolveFQDNResponse, error) {
for _, profileName := range profileNames {
log := log.With("profile", profileName)
Expand All @@ -240,23 +241,21 @@ func (r *fqdnResolver) tryResolveSSH(ctx context.Context, profileNames []string,
continue
}
rootClusterName := rootClient.ClusterName()
if !isDescendantSubdomain(fqdn, rootClusterName) {
continue
}
log = log.With("root_cluster", rootClusterName)
leafClusters, err := r.cfg.leafClusterCache.getLeafClusters(ctx, rootClient)
if err != nil {
// Good chance we're here because the user is not logged in to the profile.
log.ErrorContext(ctx, "Failed to list leaf clusters, SSH nodes in this cluster will not be resolved", "error", err)
return nil, errNoMatch
continue
}
rootDialOpts, err := r.cfg.clientApplication.GetDialOptions(ctx, profileName)
if err != nil {
log.ErrorContext(ctx, "Failed to get cluster dial options, SSH nodes in this cluster will not be resolved", "error", err)
return nil, errNoMatch
continue
}
for _, leafClusterName := range leafClusters {
log := log.With("leaf_cluster", leafClusterName)
if !isDescendantSubdomain(fqdn, leafClusterName+"."+rootClusterName) {
if !isDescendantSubdomain(fqdn, leafClusterName) {
continue
}
leafClient, err := r.cfg.clientApplication.GetCachedClient(ctx, profileName, leafClusterName)
Expand All @@ -282,8 +281,10 @@ func (r *fqdnResolver) tryResolveSSH(ctx context.Context, profileNames []string,
},
}, nil
}
// If it didn't match any leaf cluster assume it matches the root
// cluster.
// Didn't match any leaf, check if it's in the root cluster.
if !isDescendantSubdomain(fqdn, rootClusterName) {
continue
}
clusterConfig, err := r.cfg.clusterConfigCache.GetClusterConfig(ctx, rootClient)
if err != nil {
log.ErrorContext(ctx, "Failed to get VNet config, SSH nodes in this cluster will not be resolved", "error", err)
Expand Down
11 changes: 11 additions & 0 deletions lib/vnet/opensshconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ type sshConfigurator struct {

type sshConfiguratorConfig struct {
clientApplication ClientApplication
leafClusterCache *leafClusterCache
homePath string
clock clockwork.Clock
}
Expand Down Expand Up @@ -199,6 +200,16 @@ func (c *sshConfigurator) updateSSHConfiguration(ctx context.Context) error {
continue
}
hostMatchers = append(hostMatchers, hostMatcher(rootClient.RootClusterName()))
leafClusters, err := c.cfg.leafClusterCache.getLeafClusters(ctx, rootClient)
if err != nil {
log.WarnContext(ctx,
"Failed to list leaf clusters, not configuring VNet SSH for leaf clusters of this cluster",
"root_cluster", rootClient.ClusterName(), "error", err)
continue
}
for _, leafCluster := range leafClusters {
hostMatchers = append(hostMatchers, hostMatcher(leafCluster))
}
}
hostMatchers = utils.Deduplicate(hostMatchers)
slices.Sort(hostMatchers)
Expand Down
50 changes: 32 additions & 18 deletions lib/vnet/opensshconfig_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,23 +33,33 @@ func TestSSHConfigurator(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clock := clockwork.NewFakeClockAt(time.Now())
homePath := t.TempDir()

// This test gives a fake clock only to the SSH configurator and a real
// clock to everything else, so that fakeClock.BlockUntilContext will
// reliably only capture the SSH configuration loop and nothing else.
fakeClock := clockwork.NewFakeClockAt(time.Now())
realClock := clockwork.NewRealClock()

fakeClientApp := newFakeClientApp(ctx, t, &fakeClientAppConfig{
clusters: map[string]testClusterSpec{
"cluster1": {},
"cluster1": {
leafClusters: map[string]testClusterSpec{
"leaf1": {},
},
},
"cluster2": {},
},
// Give the fake client app a different clock so we can rely on
// clock.BlockUntilContext only capturing the SSH configuration loop.
clock: clockwork.NewRealClock(),
clock: realClock,
})
leafClusterCache, err := newLeafClusterCache(realClock)
require.NoError(t, err)

c := newSSHConfigurator(sshConfiguratorConfig{
clientApplication: fakeClientApp,
leafClusterCache: leafClusterCache,
homePath: homePath,
clock: clock,
clock: fakeClock,
})
errC := make(chan error)
go func() {
Expand Down Expand Up @@ -80,25 +90,29 @@ func TestSSHConfigurator(t *testing.T) {

// Wait until the configurator has had a chance to write the initial config
// file and then get blocked in the loop.
clock.BlockUntilContext(ctx, 1)
fakeClock.BlockUntilContext(ctx, 1)
// Assert the config file contains both root clusters reported by
// fakeClientApp.
assertConfigFile("*.cluster1 *.cluster2")
assertConfigFile("*.cluster1 *.cluster2 *.leaf1")

// Add a root cluster, wait until the configurator is blocked in the loop,
// advance the clock, wait until the configurator is blocked again
// indicating it should have updated the config and made it back into the
// loop, and then assert that the new cluster is in the config file.
fakeClientApp.cfg.clusters["cluster3"] = testClusterSpec{}
clock.BlockUntilContext(ctx, 1)
clock.Advance(sshConfigurationUpdateInterval)
clock.BlockUntilContext(ctx, 1)
assertConfigFile("*.cluster1 *.cluster2 *.cluster3")
// Add a new root and leaf cluster, wait until the configurator is blocked
// in the loop, advance the clock, wait until the configurator is blocked
// again indicating it should have updated the config and made it back into
// the loop, and then assert that the new clusters are in the config file.
fakeClientApp.cfg.clusters["cluster3"] = testClusterSpec{
leafClusters: map[string]testClusterSpec{
"leaf2": {},
},
}
fakeClock.BlockUntilContext(ctx, 1)
fakeClock.Advance(sshConfigurationUpdateInterval)
fakeClock.BlockUntilContext(ctx, 1)
assertConfigFile("*.cluster1 *.cluster2 *.cluster3 *.leaf1 *.leaf2")

// Kill the configurator, wait for it to return, and assert that the config
// file was deleted.
cancel()
require.ErrorIs(t, <-errC, context.Canceled)
_, err := os.Stat(keypaths.VNetSSHConfigPath(homePath))
_, err = os.Stat(keypaths.VNetSSHConfigPath(homePath))
require.ErrorIs(t, err, os.ErrNotExist)
}
3 changes: 1 addition & 2 deletions lib/vnet/ssh_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,13 @@ func (h *sshHandler) handleTCPConnector(ctx context.Context, localPort uint16, c
return trace.Wrap(err)
}
defer targetConn.Close()
return trace.Wrap(h.handleTCPConnectorWithTargetConn(ctx, localPort, connector, targetConn))
return trace.Wrap(h.handleTCPConnectorWithTargetConn(ctx, connector, targetConn))
}

// handleTCPConnectorWithTargetTCPConn handles an incoming TCP connection from
// VNet when a TCP connection to the target host has already been established.
func (h *sshHandler) handleTCPConnectorWithTargetConn(
ctx context.Context,
localPort uint16,
connector func() (net.Conn, error),
targetConn net.Conn,
) error {
Expand Down
14 changes: 6 additions & 8 deletions lib/vnet/ssh_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package vnet

import (
"cmp"
"context"
"crypto/tls"
"crypto/x509"
Expand Down Expand Up @@ -241,18 +242,15 @@ type dialTarget struct {
}

func computeDialTarget(matchedCluster *vnetv1.MatchedCluster, fqdn string) dialTarget {
targetCluster := matchedCluster.GetRootCluster()
targetHost := strings.TrimSuffix(fqdn, "."+matchedCluster.GetRootCluster()+".")
leafCluster := matchedCluster.GetLeafCluster()
if leafCluster != "" {
targetCluster = leafCluster
targetHost = strings.TrimSuffix(targetHost, "."+leafCluster)
}
// matchedCluster.LeafCluster will be set if the host was in a leaf
// cluster, else it will be unset and the target cluster is the root.
targetCluster := cmp.Or(matchedCluster.GetLeafCluster(), matchedCluster.GetRootCluster())
targetHost := strings.TrimSuffix(fqdn, "."+fullyQualify(targetCluster))
return dialTarget{
fqdn: fqdn,
profile: matchedCluster.GetProfile(),
rootCluster: matchedCluster.GetRootCluster(),
leafCluster: leafCluster,
leafCluster: matchedCluster.GetLeafCluster(),
cluster: targetCluster,
hostname: targetHost,
addr: targetHost + ":0",
Expand Down
2 changes: 1 addition & 1 deletion lib/vnet/tcp_handler_resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ func (h *undecidedHandler) handleTCPConnector(ctx context.Context, localPort uin
h.setDecidedHandler(sshHandler)
// Handle the incoming connection with the TCP connection to the target
// SSH node that has already been established.
return sshHandler.handleTCPConnectorWithTargetConn(ctx, localPort, connector, targetConn)
return sshHandler.handleTCPConnectorWithTargetConn(ctx, connector, targetConn)
}
return trace.Errorf("rejecting connection to %s:%d", h.cfg.fqdn, localPort)
}
Expand Down
1 change: 1 addition & 0 deletions lib/vnet/user_process.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ func RunUserProcess(ctx context.Context, clientApplication ClientApplication) (*
processManager, processCtx := newProcessManager()
sshConfigurator := newSSHConfigurator(sshConfiguratorConfig{
clientApplication: clientApplication,
leafClusterCache: leafClusterCache,
})
processManager.AddCriticalBackgroundTask("SSH configuration loop", func() error {
return trace.Wrap(sshConfigurator.runConfigurationLoop(processCtx))
Expand Down
54 changes: 23 additions & 31 deletions lib/vnet/vnet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -750,20 +750,16 @@ func TestDialFakeApp(t *testing.T) {
clusters: map[string]testClusterSpec{
"root1.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1.root1.example.com"},
appSpec{publicAddr: "echo2.root1.example.com"},
appSpec{publicAddr: "echo.myzone.example.com"},
appSpec{publicAddr: "echo.nested.myzone.example.com"},
appSpec{publicAddr: "not.in.a.custom.zone"},
appSpec{
{publicAddr: "echo1.root1.example.com"},
{publicAddr: "echo2.root1.example.com"},
{publicAddr: "echo.myzone.example.com"},
{publicAddr: "echo.nested.myzone.example.com"},
{publicAddr: "not.in.a.custom.zone"},
{
publicAddr: "multi-port.root1.example.com",
tcpPorts: []*types.PortRange{
&types.PortRange{
Port: 1337,
},
&types.PortRange{
Port: 4242,
},
{Port: 1337},
{Port: 4242},
},
},
},
Expand All @@ -774,36 +770,32 @@ func TestDialFakeApp(t *testing.T) {
leafClusters: map[string]testClusterSpec{
"leaf1.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1.leaf1.example.com"},
appSpec{
{publicAddr: "echo1.leaf1.example.com"},
{
publicAddr: "multi-port.leaf1.example.com",
tcpPorts: []*types.PortRange{
&types.PortRange{
Port: 1337,
},
&types.PortRange{
Port: 4242,
},
{Port: 1337},
{Port: 4242},
},
},
},
},
"leaf2.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1.leaf2.example.com"},
{publicAddr: "echo1.leaf2.example.com"},
},
},
},
},
"root2.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1.root2.example.com"},
appSpec{publicAddr: "echo2.root2.example.com"},
{publicAddr: "echo1.root2.example.com"},
{publicAddr: "echo2.root2.example.com"},
},
leafClusters: map[string]testClusterSpec{
"leaf3.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1.leaf3.example.com"},
{publicAddr: "echo1.leaf3.example.com"},
},
},
},
Expand Down Expand Up @@ -1045,7 +1037,7 @@ func TestOnNewConnection(t *testing.T) {
clusters: map[string]testClusterSpec{
"root1.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1"},
{publicAddr: "echo1"},
},
cidrRange: "192.168.2.0/24",
leafClusters: map[string]testClusterSpec{},
Expand Down Expand Up @@ -1106,15 +1098,15 @@ func testWithAlgorithmSuite(t *testing.T, suite types.SignatureAlgorithmSuite) {
clusters: map[string]testClusterSpec{
"root.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1"},
appSpec{publicAddr: "echo2"},
{publicAddr: "echo1"},
{publicAddr: "echo2"},
},
cidrRange: "192.168.2.0/24",
leafClusters: map[string]testClusterSpec{
"leaf.example.com": {
apps: []appSpec{
appSpec{publicAddr: "echo1"},
appSpec{publicAddr: "echo2"},
{publicAddr: "echo1"},
{publicAddr: "echo2"},
},
cidrRange: "192.168.2.0/24",
},
Expand Down Expand Up @@ -1297,7 +1289,7 @@ func TestSSH(t *testing.T) {
},
{
// Connection to node in leaf cluster should work.
dialAddr: "node.leaf1.example.com.root1.example.com",
dialAddr: "node.leaf1.example.com",
dialPort: 22,
expectCIDR: leaf1CIDR,
sshUser: "testuser",
Expand All @@ -1315,7 +1307,7 @@ func TestSSH(t *testing.T) {
{
// Connection to node in leaf cluster in alternate profile should
// work.
dialAddr: "node.leaf2.example.com.root2.example.com",
dialAddr: "node.leaf2.example.com",
dialPort: 22,
expectCIDR: leaf2CIDR,
sshUser: "testuser",
Expand Down
Loading
Loading